1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
7 * DOC: VC4 plane module
9 * Each DRM plane is a layer of pixels being scanned out by the HVS.
11 * At atomic modeset check time, we compute the HVS display element
12 * state that would be necessary for displaying the plane (giving us a
13 * chance to figure out if a plane configuration is invalid), then at
14 * atomic flush time the CRTC will ask us to write our element state
15 * into the region of the HVS that it has allocated for us.
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_atomic_uapi.h>
21 #include <drm/drm_fb_cma_helper.h>
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_gem_atomic_helper.h>
24 #include <drm/drm_plane_helper.h>
26 #include "uapi/drm/vc4_drm.h"
31 static const struct hvs_format {
32 u32 drm; /* DRM_FORMAT_* */
33 u32 hvs; /* HVS_FORMAT_* */
39 .drm = DRM_FORMAT_XRGB8888,
40 .hvs = HVS_PIXEL_FORMAT_RGBA8888,
41 .pixel_order = HVS_PIXEL_ORDER_ABGR,
42 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
45 .drm = DRM_FORMAT_ARGB8888,
46 .hvs = HVS_PIXEL_FORMAT_RGBA8888,
47 .pixel_order = HVS_PIXEL_ORDER_ABGR,
48 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
51 .drm = DRM_FORMAT_ABGR8888,
52 .hvs = HVS_PIXEL_FORMAT_RGBA8888,
53 .pixel_order = HVS_PIXEL_ORDER_ARGB,
54 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
57 .drm = DRM_FORMAT_XBGR8888,
58 .hvs = HVS_PIXEL_FORMAT_RGBA8888,
59 .pixel_order = HVS_PIXEL_ORDER_ARGB,
60 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
63 .drm = DRM_FORMAT_RGB565,
64 .hvs = HVS_PIXEL_FORMAT_RGB565,
65 .pixel_order = HVS_PIXEL_ORDER_XRGB,
66 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
69 .drm = DRM_FORMAT_BGR565,
70 .hvs = HVS_PIXEL_FORMAT_RGB565,
71 .pixel_order = HVS_PIXEL_ORDER_XBGR,
72 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
75 .drm = DRM_FORMAT_ARGB1555,
76 .hvs = HVS_PIXEL_FORMAT_RGBA5551,
77 .pixel_order = HVS_PIXEL_ORDER_ABGR,
78 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
81 .drm = DRM_FORMAT_XRGB1555,
82 .hvs = HVS_PIXEL_FORMAT_RGBA5551,
83 .pixel_order = HVS_PIXEL_ORDER_ABGR,
84 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
87 .drm = DRM_FORMAT_RGB888,
88 .hvs = HVS_PIXEL_FORMAT_RGB888,
89 .pixel_order = HVS_PIXEL_ORDER_XRGB,
90 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
93 .drm = DRM_FORMAT_BGR888,
94 .hvs = HVS_PIXEL_FORMAT_RGB888,
95 .pixel_order = HVS_PIXEL_ORDER_XBGR,
96 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
99 .drm = DRM_FORMAT_YUV422,
100 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
101 .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
102 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
105 .drm = DRM_FORMAT_YVU422,
106 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
107 .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
108 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
111 .drm = DRM_FORMAT_YUV420,
112 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
113 .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
114 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
117 .drm = DRM_FORMAT_YVU420,
118 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
119 .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
120 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
123 .drm = DRM_FORMAT_NV12,
124 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
125 .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
126 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
129 .drm = DRM_FORMAT_NV21,
130 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
131 .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
132 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
135 .drm = DRM_FORMAT_NV16,
136 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
137 .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
138 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
141 .drm = DRM_FORMAT_NV61,
142 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
143 .pixel_order = HVS_PIXEL_ORDER_XYCRCB,
144 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
147 .drm = DRM_FORMAT_P030,
148 .hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
149 .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
153 .drm = DRM_FORMAT_XRGB2101010,
154 .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
155 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
159 .drm = DRM_FORMAT_ARGB2101010,
160 .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
161 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
165 .drm = DRM_FORMAT_ABGR2101010,
166 .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
167 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
171 .drm = DRM_FORMAT_XBGR2101010,
172 .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
173 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
177 .drm = DRM_FORMAT_RGB332,
178 .hvs = HVS_PIXEL_FORMAT_RGB332,
179 .pixel_order = HVS_PIXEL_ORDER_ARGB,
180 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
183 .drm = DRM_FORMAT_BGR233,
184 .hvs = HVS_PIXEL_FORMAT_RGB332,
185 .pixel_order = HVS_PIXEL_ORDER_ABGR,
186 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
189 .drm = DRM_FORMAT_XRGB4444,
190 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
191 .pixel_order = HVS_PIXEL_ORDER_ABGR,
192 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
195 .drm = DRM_FORMAT_ARGB4444,
196 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
197 .pixel_order = HVS_PIXEL_ORDER_ABGR,
198 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
201 .drm = DRM_FORMAT_XBGR4444,
202 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
203 .pixel_order = HVS_PIXEL_ORDER_ARGB,
204 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
207 .drm = DRM_FORMAT_ABGR4444,
208 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
209 .pixel_order = HVS_PIXEL_ORDER_ARGB,
210 .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
213 .drm = DRM_FORMAT_BGRX4444,
214 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
215 .pixel_order = HVS_PIXEL_ORDER_RGBA,
216 .pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
219 .drm = DRM_FORMAT_BGRA4444,
220 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
221 .pixel_order = HVS_PIXEL_ORDER_RGBA,
222 .pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
225 .drm = DRM_FORMAT_RGBX4444,
226 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
227 .pixel_order = HVS_PIXEL_ORDER_BGRA,
228 .pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
231 .drm = DRM_FORMAT_RGBA4444,
232 .hvs = HVS_PIXEL_FORMAT_RGBA4444,
233 .pixel_order = HVS_PIXEL_ORDER_BGRA,
234 .pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
238 static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
242 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
243 if (hvs_formats[i].drm == drm_format)
244 return &hvs_formats[i];
250 static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
252 if (dst == src >> 16)
253 return VC4_SCALING_NONE;
254 if (3 * dst >= 2 * (src >> 16))
255 return VC4_SCALING_PPF;
257 return VC4_SCALING_TPZ;
260 static bool plane_enabled(struct drm_plane_state *state)
262 return state->fb && !WARN_ON(!state->crtc);
265 static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
267 struct vc4_plane_state *vc4_state;
269 if (WARN_ON(!plane->state))
272 vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL);
276 memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
277 vc4_state->dlist_initialized = 0;
279 __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
281 if (vc4_state->dlist) {
282 vc4_state->dlist = kmemdup(vc4_state->dlist,
283 vc4_state->dlist_count * 4,
285 if (!vc4_state->dlist) {
289 vc4_state->dlist_size = vc4_state->dlist_count;
292 return &vc4_state->base;
295 static void vc4_plane_destroy_state(struct drm_plane *plane,
296 struct drm_plane_state *state)
298 struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
299 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
301 if (drm_mm_node_allocated(&vc4_state->lbm)) {
302 unsigned long irqflags;
304 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
305 drm_mm_remove_node(&vc4_state->lbm);
306 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
309 kfree(vc4_state->dlist);
310 __drm_atomic_helper_plane_destroy_state(&vc4_state->base);
314 /* Called during init to allocate the plane's atomic state. */
315 static void vc4_plane_reset(struct drm_plane *plane)
317 struct vc4_plane_state *vc4_state;
319 WARN_ON(plane->state);
321 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL);
325 __drm_atomic_helper_plane_reset(plane, &vc4_state->base);
328 static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
330 if (vc4_state->dlist_count == vc4_state->dlist_size) {
331 u32 new_size = max(4u, vc4_state->dlist_count * 2);
332 u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL);
336 memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4);
338 kfree(vc4_state->dlist);
339 vc4_state->dlist = new_dlist;
340 vc4_state->dlist_size = new_size;
343 vc4_state->dlist_count++;
346 static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
348 unsigned int idx = vc4_state->dlist_count;
350 vc4_dlist_counter_increment(vc4_state);
351 vc4_state->dlist[idx] = val;
354 /* Returns the scl0/scl1 field based on whether the dimensions need to
355 * be up/down/non-scaled.
357 * This is a replication of a table from the spec.
359 static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
361 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
363 switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) {
364 case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF:
365 return SCALER_CTL0_SCL_H_PPF_V_PPF;
366 case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF:
367 return SCALER_CTL0_SCL_H_TPZ_V_PPF;
368 case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ:
369 return SCALER_CTL0_SCL_H_PPF_V_TPZ;
370 case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ:
371 return SCALER_CTL0_SCL_H_TPZ_V_TPZ;
372 case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE:
373 return SCALER_CTL0_SCL_H_PPF_V_NONE;
374 case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF:
375 return SCALER_CTL0_SCL_H_NONE_V_PPF;
376 case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ:
377 return SCALER_CTL0_SCL_H_NONE_V_TPZ;
378 case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE:
379 return SCALER_CTL0_SCL_H_TPZ_V_NONE;
381 case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE:
382 /* The unity case is independently handled by
389 static int vc4_plane_margins_adj(struct drm_plane_state *pstate)
391 struct vc4_plane_state *vc4_pstate = to_vc4_plane_state(pstate);
392 unsigned int left, right, top, bottom, adjhdisplay, adjvdisplay;
393 struct drm_crtc_state *crtc_state;
395 crtc_state = drm_atomic_get_new_crtc_state(pstate->state,
398 vc4_crtc_get_margins(crtc_state, &left, &right, &top, &bottom);
399 if (!left && !right && !top && !bottom)
402 if (left + right >= crtc_state->mode.hdisplay ||
403 top + bottom >= crtc_state->mode.vdisplay)
406 adjhdisplay = crtc_state->mode.hdisplay - (left + right);
407 vc4_pstate->crtc_x = DIV_ROUND_CLOSEST(vc4_pstate->crtc_x *
409 crtc_state->mode.hdisplay);
410 vc4_pstate->crtc_x += left;
411 if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - right)
412 vc4_pstate->crtc_x = crtc_state->mode.hdisplay - right;
414 adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
415 vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
417 crtc_state->mode.vdisplay);
418 vc4_pstate->crtc_y += top;
419 if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - bottom)
420 vc4_pstate->crtc_y = crtc_state->mode.vdisplay - bottom;
422 vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
424 crtc_state->mode.hdisplay);
425 vc4_pstate->crtc_h = DIV_ROUND_CLOSEST(vc4_pstate->crtc_h *
427 crtc_state->mode.vdisplay);
429 if (!vc4_pstate->crtc_w || !vc4_pstate->crtc_h)
435 static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
437 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
438 struct drm_framebuffer *fb = state->fb;
439 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
440 int num_planes = fb->format->num_planes;
441 struct drm_crtc_state *crtc_state;
442 u32 h_subsample = fb->format->hsub;
443 u32 v_subsample = fb->format->vsub;
446 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
449 DRM_DEBUG_KMS("Invalid crtc state\n");
453 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
454 INT_MAX, true, true);
458 for (i = 0; i < num_planes; i++)
459 vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
461 vc4_state->src_x = state->src.x1;
462 vc4_state->src_y = state->src.y1;
463 vc4_state->src_w[0] = state->src.x2 - vc4_state->src_x;
464 vc4_state->src_h[0] = state->src.y2 - vc4_state->src_y;
466 vc4_state->crtc_x = state->dst.x1;
467 vc4_state->crtc_y = state->dst.y1;
468 vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
469 vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
471 ret = vc4_plane_margins_adj(state);
475 vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
477 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
480 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
481 vc4_state->y_scaling[0] == VC4_SCALING_NONE);
483 if (num_planes > 1) {
484 vc4_state->is_yuv = true;
486 vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
487 vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
489 vc4_state->x_scaling[1] =
490 vc4_get_scaling_mode(vc4_state->src_w[1],
492 vc4_state->y_scaling[1] =
493 vc4_get_scaling_mode(vc4_state->src_h[1],
496 /* YUV conversion requires that horizontal scaling be enabled
497 * on the UV plane even if vc4_get_scaling_mode() returned
498 * VC4_SCALING_NONE (which can happen when the down-scaling
499 * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
502 if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
503 vc4_state->x_scaling[1] = VC4_SCALING_PPF;
505 vc4_state->is_yuv = false;
506 vc4_state->x_scaling[1] = VC4_SCALING_NONE;
507 vc4_state->y_scaling[1] = VC4_SCALING_NONE;
513 static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
519 /* The specs note that while the reciprocal would be defined
520 * as (1<<32)/scale, ~0 is close enough.
524 vc4_dlist_write(vc4_state,
525 VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) |
526 VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE));
527 vc4_dlist_write(vc4_state,
528 VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP));
531 /* phase magnitude bits */
534 static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst, u32 xy, int channel, int chroma_offset)
536 u32 scale = src / dst;
540 /* Start the phase at 1/2 pixel from the 1st pixel at src_x.
541 1/4 pixel for YUV, plus the offset for chroma siting */
543 /* the phase is relative to scale_src->x, so shift it for display list's x value */
544 offset = (xy & 0x1ffff) >> (16 - PHASE_BITS) >> 1;
545 offset -= chroma_offset >> (17 - PHASE_BITS);
546 offset += -(1 << PHASE_BITS >> 2);
548 /* the phase is relative to scale_src->x, so shift it for display list's x value */
549 offset = (xy & 0xffff) >> (16 - PHASE_BITS);
550 offset += -(1 << PHASE_BITS >> 1);
552 /* This is a kludge to make sure the scaling factors are consitent with YUV's luma scaling.
553 we lose 1bit precision because of this. */
557 /* There may be a also small error introduced by precision of scale.
558 Add half of that as a compromise */
559 offset2 = src - dst * scale;
560 offset2 >>= 16 - PHASE_BITS;
561 phase = offset + (offset2 >> 1);
563 /* Ensure +ve values don't touch the sign bit, then truncate negative values */
564 if (phase >= 1 << PHASE_BITS)
565 phase = (1 << PHASE_BITS) - 1;
567 phase &= SCALER_PPF_IPHASE_MASK;
569 vc4_dlist_write(vc4_state,
571 VC4_SET_FIELD(scale, SCALER_PPF_SCALE) |
572 VC4_SET_FIELD(phase, SCALER_PPF_IPHASE));
575 static u32 vc4_lbm_size(struct drm_plane_state *state)
577 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
578 struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
582 /* LBM is not needed when there's no vertical scaling. */
583 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
584 vc4_state->y_scaling[1] == VC4_SCALING_NONE)
588 * This can be further optimized in the RGB/YUV444 case if the PPF
589 * decimation factor is between 0.5 and 1.0 by using crtc_w.
591 * It's not an issue though, since in that case since src_w[0] is going
592 * to be greater than or equal to crtc_w.
594 if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ)
595 pix_per_line = vc4_state->crtc_w;
597 pix_per_line = vc4_state->src_w[0] >> 16;
599 if (!vc4_state->is_yuv) {
600 if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
601 lbm = pix_per_line * 8;
603 /* In special cases, this multiplier might be 12. */
604 lbm = pix_per_line * 16;
607 /* There are cases for this going down to a multiplier
608 * of 2, but according to the firmware source, the
609 * table in the docs is somewhat wrong.
611 lbm = pix_per_line * 16;
614 /* Align it to 64 or 128 (hvs5) bytes */
615 lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
617 /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
618 lbm /= vc4->is_vc5 ? 4 : 2;
623 static void vc4_write_scaling_parameters(struct drm_plane_state *state,
626 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
628 /* Ch0 H-PPF Word 0: Scaling Parameters */
629 if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) {
630 vc4_write_ppf(vc4_state,
631 vc4_state->src_w[channel], vc4_state->crtc_w, vc4_state->src_x, channel,
632 state->chroma_siting_h);
635 /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */
636 if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) {
637 vc4_write_ppf(vc4_state,
638 vc4_state->src_h[channel], vc4_state->crtc_h, vc4_state->src_y, channel,
639 state->chroma_siting_v);
640 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
643 /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */
644 if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) {
645 vc4_write_tpz(vc4_state,
646 vc4_state->src_w[channel], vc4_state->crtc_w);
649 /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */
650 if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) {
651 vc4_write_tpz(vc4_state,
652 vc4_state->src_h[channel], vc4_state->crtc_h);
653 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
657 static void vc4_plane_calc_load(struct drm_plane_state *state)
659 unsigned int hvs_load_shift, vrefresh, i;
660 struct drm_framebuffer *fb = state->fb;
661 struct vc4_plane_state *vc4_state;
662 struct drm_crtc_state *crtc_state;
663 unsigned int vscale_factor;
665 vc4_state = to_vc4_plane_state(state);
666 crtc_state = drm_atomic_get_existing_crtc_state(state->state,
668 vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
670 /* The HVS is able to process 2 pixels/cycle when scaling the source,
671 * 4 pixels/cycle otherwise.
672 * Alpha blending step seems to be pipelined and it's always operating
673 * at 4 pixels/cycle, so the limiting aspect here seems to be the
675 * HVS load is expressed in clk-cycles/sec (AKA Hz).
677 if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
678 vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
679 vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
680 vc4_state->y_scaling[1] != VC4_SCALING_NONE)
685 vc4_state->membus_load = 0;
686 vc4_state->hvs_load = 0;
687 for (i = 0; i < fb->format->num_planes; i++) {
688 /* Even if the bandwidth/plane required for a single frame is
690 * (vc4_state->src_w[i] >> 16) * (vc4_state->src_h[i] >> 16) *
693 * when downscaling, we have to read more pixels per line in
694 * the time frame reserved for a single line, so the bandwidth
695 * demand can be punctually higher. To account for that, we
696 * calculate the down-scaling factor and multiply the plane
697 * load by this number. We're likely over-estimating the read
698 * demand, but that's better than under-estimating it.
700 vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i] >> 16,
702 vc4_state->membus_load += (vc4_state->src_w[i] >> 16) *
703 (vc4_state->src_h[i] >> 16) *
704 vscale_factor * fb->format->cpp[i];
705 vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
708 vc4_state->hvs_load *= vrefresh;
709 vc4_state->hvs_load >>= hvs_load_shift;
710 vc4_state->membus_load *= vrefresh;
713 static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
715 struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
716 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
717 unsigned long irqflags;
720 lbm_size = vc4_lbm_size(state);
724 if (WARN_ON(!vc4_state->lbm_offset))
727 /* Allocate the LBM memory that the HVS will use for temporary
728 * storage due to our scaling/format conversion.
730 if (!drm_mm_node_allocated(&vc4_state->lbm)) {
733 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
734 ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
737 vc4->is_vc5 ? 64 : 32,
739 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
744 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
747 vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
753 * The colorspace conversion matrices are held in 3 entries in the dlist.
754 * Create an array of them, with entries for each full and limited mode, and
755 * each supported colorspace.
757 static const u32 colorspace_coeffs[2][DRM_COLOR_ENCODING_MAX][3] = {
762 SCALER_CSC0_ITR_R_601_5,
763 SCALER_CSC1_ITR_R_601_5,
764 SCALER_CSC2_ITR_R_601_5,
767 SCALER_CSC0_ITR_R_709_3,
768 SCALER_CSC1_ITR_R_709_3,
769 SCALER_CSC2_ITR_R_709_3,
772 SCALER_CSC0_ITR_R_2020,
773 SCALER_CSC1_ITR_R_2020,
774 SCALER_CSC2_ITR_R_2020,
780 SCALER_CSC0_JPEG_JFIF,
781 SCALER_CSC1_JPEG_JFIF,
782 SCALER_CSC2_JPEG_JFIF,
785 SCALER_CSC0_ITR_R_709_3_FR,
786 SCALER_CSC1_ITR_R_709_3_FR,
787 SCALER_CSC2_ITR_R_709_3_FR,
790 SCALER_CSC0_ITR_R_2020_FR,
791 SCALER_CSC1_ITR_R_2020_FR,
792 SCALER_CSC2_ITR_R_2020_FR,
797 static u32 vc4_hvs4_get_alpha_blend_mode(struct drm_plane_state *state)
799 if (!state->fb->format->has_alpha)
800 return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
801 SCALER_POS2_ALPHA_MODE);
803 switch (state->pixel_blend_mode) {
804 case DRM_MODE_BLEND_PIXEL_NONE:
805 return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_FIXED,
806 SCALER_POS2_ALPHA_MODE);
808 case DRM_MODE_BLEND_PREMULTI:
809 return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
810 SCALER_POS2_ALPHA_MODE) |
811 SCALER_POS2_ALPHA_PREMULT;
812 case DRM_MODE_BLEND_COVERAGE:
813 return VC4_SET_FIELD(SCALER_POS2_ALPHA_MODE_PIPELINE,
814 SCALER_POS2_ALPHA_MODE);
818 static u32 vc4_hvs5_get_alpha_blend_mode(struct drm_plane_state *state)
820 if (!state->fb->format->has_alpha)
821 return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
822 SCALER5_CTL2_ALPHA_MODE);
824 switch (state->pixel_blend_mode) {
825 case DRM_MODE_BLEND_PIXEL_NONE:
826 return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_FIXED,
827 SCALER5_CTL2_ALPHA_MODE);
829 case DRM_MODE_BLEND_PREMULTI:
830 return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
831 SCALER5_CTL2_ALPHA_MODE) |
832 SCALER5_CTL2_ALPHA_PREMULT;
833 case DRM_MODE_BLEND_COVERAGE:
834 return VC4_SET_FIELD(SCALER5_CTL2_ALPHA_MODE_PIPELINE,
835 SCALER5_CTL2_ALPHA_MODE);
839 /* Writes out a full display list for an active plane to the plane's
840 * private dlist state.
842 static int vc4_plane_mode_set(struct drm_plane *plane,
843 struct drm_plane_state *state)
845 struct vc4_dev *vc4 = to_vc4_dev(plane->dev);
846 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
847 struct drm_framebuffer *fb = state->fb;
848 u32 ctl0_offset = vc4_state->dlist_count;
849 const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
850 u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
851 int num_planes = fb->format->num_planes;
852 u32 h_subsample = fb->format->hsub;
853 u32 v_subsample = fb->format->vsub;
854 bool mix_plane_alpha;
856 u32 scl0, scl1, pitch0;
857 u32 tiling, src_x, src_y;
859 u32 hvs_format = format->hvs;
860 unsigned int rotation;
863 if (vc4_state->dlist_initialized)
866 ret = vc4_plane_setup_clipping_and_scaling(state);
870 width = vc4_state->src_w[0] >> 16;
871 height = vc4_state->src_h[0] >> 16;
873 /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB
874 * and 4:4:4, scl1 should be set to scl0 so both channels of
875 * the scaler do the same thing. For YUV, the Y plane needs
876 * to be put in channel 1 and Cb/Cr in channel 0, so we swap
877 * the scl fields here.
879 if (num_planes == 1) {
880 scl0 = vc4_get_scl_field(state, 0);
883 scl0 = vc4_get_scl_field(state, 1);
884 scl1 = vc4_get_scl_field(state, 0);
887 rotation = drm_rotation_simplify(state->rotation,
892 /* We must point to the last line when Y reflection is enabled. */
893 src_y = vc4_state->src_y >> 16;
894 if (rotation & DRM_MODE_REFLECT_Y)
897 src_x = vc4_state->src_x >> 16;
899 switch (base_format_mod) {
900 case DRM_FORMAT_MOD_LINEAR:
901 tiling = SCALER_CTL0_TILING_LINEAR;
902 pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
904 /* Adjust the base pointer to the first pixel to be scanned
907 for (i = 0; i < num_planes; i++) {
908 vc4_state->offsets[i] += src_y /
909 (i ? v_subsample : 1) *
912 vc4_state->offsets[i] += src_x /
913 (i ? h_subsample : 1) *
919 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
920 u32 tile_size_shift = 12; /* T tiles are 4kb */
921 /* Whole-tile offsets, mostly for setting the pitch. */
922 u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5;
923 u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
924 u32 tile_w_mask = (1 << tile_w_shift) - 1;
925 /* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice
926 * the height (in pixels) of a 4k tile.
928 u32 tile_h_mask = (2 << tile_h_shift) - 1;
929 /* For T-tiled, the FB pitch is "how many bytes from one row to
930 * the next, such that
932 * pitch * tile_h == tile_size * tiles_per_row
934 u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
935 u32 tiles_l = src_x >> tile_w_shift;
936 u32 tiles_r = tiles_w - tiles_l;
937 u32 tiles_t = src_y >> tile_h_shift;
938 /* Intra-tile offsets, which modify the base address (the
939 * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
942 u32 tile_y = (src_y >> 4) & 1;
943 u32 subtile_y = (src_y >> 2) & 3;
944 u32 utile_y = src_y & 3;
945 u32 x_off = src_x & tile_w_mask;
946 u32 y_off = src_y & tile_h_mask;
948 /* When Y reflection is requested we must set the
949 * SCALER_PITCH0_TILE_LINE_DIR flag to tell HVS that all lines
950 * after the initial one should be fetched in descending order,
951 * which makes sense since we start from the last line and go
953 * Don't know why we need y_off = max_y_off - y_off, but it's
954 * definitely required (I guess it's also related to the "going
955 * backward" situation).
957 if (rotation & DRM_MODE_REFLECT_Y) {
958 y_off = tile_h_mask - y_off;
959 pitch0 = SCALER_PITCH0_TILE_LINE_DIR;
964 tiling = SCALER_CTL0_TILING_256B_OR_T;
965 pitch0 |= (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
966 VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
967 VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
968 VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
969 vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
970 vc4_state->offsets[0] += subtile_y << 8;
971 vc4_state->offsets[0] += utile_y << 4;
973 /* Rows of tiles alternate left-to-right and right-to-left. */
975 pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
976 vc4_state->offsets[0] += (tiles_w - tiles_l) <<
978 vc4_state->offsets[0] -= (1 + !tile_y) << 10;
980 vc4_state->offsets[0] += tiles_l << tile_size_shift;
981 vc4_state->offsets[0] += tile_y << 10;
987 case DRM_FORMAT_MOD_BROADCOM_SAND64:
988 case DRM_FORMAT_MOD_BROADCOM_SAND128:
989 case DRM_FORMAT_MOD_BROADCOM_SAND256: {
990 uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
992 if (param > SCALER_TILE_HEIGHT_MASK) {
993 DRM_DEBUG_KMS("SAND height too large (%d)\n",
998 if (fb->format->format == DRM_FORMAT_P030) {
999 hvs_format = HVS_PIXEL_FORMAT_YCBCR_10BIT;
1000 tiling = SCALER_CTL0_TILING_128B;
1002 hvs_format = HVS_PIXEL_FORMAT_H264;
1004 switch (base_format_mod) {
1005 case DRM_FORMAT_MOD_BROADCOM_SAND64:
1006 tiling = SCALER_CTL0_TILING_64B;
1008 case DRM_FORMAT_MOD_BROADCOM_SAND128:
1009 tiling = SCALER_CTL0_TILING_128B;
1011 case DRM_FORMAT_MOD_BROADCOM_SAND256:
1012 tiling = SCALER_CTL0_TILING_256B_OR_T;
1019 /* Adjust the base pointer to the first pixel to be scanned
1022 * For P030, y_ptr [31:4] is the 128bit word for the start pixel
1023 * y_ptr [3:0] is the pixel (0-11) contained within that 128bit
1024 * word that should be taken as the first pixel.
1025 * Ditto uv_ptr [31:4] vs [3:0], however [3:0] contains the
1026 * element within the 128bit word, eg for pixel 3 the value
1029 for (i = 0; i < num_planes; i++) {
1030 u32 tile_w, tile, x_off, pix_per_tile;
1032 if (fb->format->format == DRM_FORMAT_P030) {
1034 * Spec says: bits [31:4] of the given address
1035 * should point to the 128-bit word containing
1036 * the desired starting pixel, and bits[3:0]
1037 * should be between 0 and 11, indicating which
1038 * of the 12-pixels in that 128-bit word is the
1039 * first pixel to be used
1041 u32 remaining_pixels = src_x % 96;
1042 u32 aligned = remaining_pixels / 12;
1043 u32 last_bits = remaining_pixels % 12;
1045 x_off = aligned * 16 + last_bits;
1049 switch (base_format_mod) {
1050 case DRM_FORMAT_MOD_BROADCOM_SAND64:
1053 case DRM_FORMAT_MOD_BROADCOM_SAND128:
1056 case DRM_FORMAT_MOD_BROADCOM_SAND256:
1062 pix_per_tile = tile_w / fb->format->cpp[0];
1063 x_off = (src_x % pix_per_tile) /
1064 (i ? h_subsample : 1) *
1068 tile = src_x / pix_per_tile;
1070 vc4_state->offsets[i] += param * tile_w * tile;
1071 vc4_state->offsets[i] += src_y /
1072 (i ? v_subsample : 1) *
1074 vc4_state->offsets[i] += x_off & ~(i ? 1 : 0);
1077 pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
1082 DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
1083 (long long)fb->modifier);
1087 /* fetch an extra pixel if we don't actually line up with the left edge. */
1088 if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16))
1091 /* same for the right side */
1092 if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) &&
1093 vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16))
1096 /* now for the top */
1097 if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16))
1100 /* and the bottom */
1101 if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) &&
1102 vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16))
1105 /* Don't waste cycles mixing with plane alpha if the set alpha
1106 * is opaque or there is no per-pixel alpha information.
1107 * In any case we use the alpha property value as the fixed alpha.
1109 mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
1110 fb->format->has_alpha;
1114 vc4_dlist_write(vc4_state,
1116 (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) |
1117 (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) |
1118 VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
1119 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
1120 (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
1121 VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
1122 (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
1123 VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
1124 VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1));
1126 /* Position Word 0: Image Positions and Alpha Value */
1127 vc4_state->pos0_offset = vc4_state->dlist_count;
1128 vc4_dlist_write(vc4_state,
1129 VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) |
1130 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) |
1131 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y));
1133 /* Position Word 1: Scaled Image Dimensions. */
1134 if (!vc4_state->is_unity) {
1135 vc4_dlist_write(vc4_state,
1136 VC4_SET_FIELD(vc4_state->crtc_w,
1137 SCALER_POS1_SCL_WIDTH) |
1138 VC4_SET_FIELD(vc4_state->crtc_h,
1139 SCALER_POS1_SCL_HEIGHT));
1142 /* Position Word 2: Source Image Size, Alpha */
1143 vc4_state->pos2_offset = vc4_state->dlist_count;
1144 vc4_dlist_write(vc4_state,
1145 (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) |
1146 vc4_hvs4_get_alpha_blend_mode(state) |
1147 VC4_SET_FIELD(width, SCALER_POS2_WIDTH) |
1148 VC4_SET_FIELD(height, SCALER_POS2_HEIGHT));
1150 /* Position Word 3: Context. Written by the HVS. */
1151 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
1155 vc4_dlist_write(vc4_state,
1157 (format->pixel_order_hvs5 << SCALER_CTL0_ORDER_SHIFT) |
1158 (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
1159 VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
1160 (vc4_state->is_unity ?
1161 SCALER5_CTL0_UNITY : 0) |
1162 VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
1163 VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1) |
1164 SCALER5_CTL0_ALPHA_EXPAND |
1165 SCALER5_CTL0_RGB_EXPAND);
1167 /* Position Word 0: Image Positions and Alpha Value */
1168 vc4_state->pos0_offset = vc4_state->dlist_count;
1169 vc4_dlist_write(vc4_state,
1170 (rotation & DRM_MODE_REFLECT_Y ?
1171 SCALER5_POS0_VFLIP : 0) |
1172 VC4_SET_FIELD(vc4_state->crtc_x,
1173 SCALER_POS0_START_X) |
1174 (rotation & DRM_MODE_REFLECT_X ?
1175 SCALER5_POS0_HFLIP : 0) |
1176 VC4_SET_FIELD(vc4_state->crtc_y,
1177 SCALER5_POS0_START_Y)
1180 /* Control Word 2 */
1181 vc4_dlist_write(vc4_state,
1182 VC4_SET_FIELD(state->alpha >> 4,
1183 SCALER5_CTL2_ALPHA) |
1184 vc4_hvs5_get_alpha_blend_mode(state) |
1186 SCALER5_CTL2_ALPHA_MIX : 0)
1189 /* Position Word 1: Scaled Image Dimensions. */
1190 if (!vc4_state->is_unity) {
1191 vc4_dlist_write(vc4_state,
1192 VC4_SET_FIELD(vc4_state->crtc_w,
1193 SCALER5_POS1_SCL_WIDTH) |
1194 VC4_SET_FIELD(vc4_state->crtc_h,
1195 SCALER5_POS1_SCL_HEIGHT));
1198 /* Position Word 2: Source Image Size */
1199 vc4_state->pos2_offset = vc4_state->dlist_count;
1200 vc4_dlist_write(vc4_state,
1201 VC4_SET_FIELD(width, SCALER5_POS2_WIDTH) |
1202 VC4_SET_FIELD(height, SCALER5_POS2_HEIGHT));
1204 /* Position Word 3: Context. Written by the HVS. */
1205 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
1209 /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers
1211 * The pointers may be any byte address.
1213 vc4_state->ptr0_offset = vc4_state->dlist_count;
1214 for (i = 0; i < num_planes; i++)
1215 vc4_dlist_write(vc4_state, vc4_state->offsets[i]);
1217 /* Pointer Context Word 0/1/2: Written by the HVS */
1218 for (i = 0; i < num_planes; i++)
1219 vc4_dlist_write(vc4_state, 0xc0c0c0c0);
1222 vc4_dlist_write(vc4_state, pitch0);
1224 /* Pitch word 1/2 */
1225 for (i = 1; i < num_planes; i++) {
1226 if (hvs_format != HVS_PIXEL_FORMAT_H264 &&
1227 hvs_format != HVS_PIXEL_FORMAT_YCBCR_10BIT) {
1228 vc4_dlist_write(vc4_state,
1229 VC4_SET_FIELD(fb->pitches[i],
1232 vc4_dlist_write(vc4_state, pitch0);
1236 /* Colorspace conversion words */
1237 if (vc4_state->is_yuv) {
1238 enum drm_color_encoding color_encoding = state->color_encoding;
1239 enum drm_color_range color_range = state->color_range;
1242 if (color_encoding >= DRM_COLOR_ENCODING_MAX)
1243 color_encoding = DRM_COLOR_YCBCR_BT601;
1244 if (color_range >= DRM_COLOR_RANGE_MAX)
1245 color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
1247 ccm = colorspace_coeffs[color_range][color_encoding];
1249 vc4_dlist_write(vc4_state, ccm[0]);
1250 vc4_dlist_write(vc4_state, ccm[1]);
1251 vc4_dlist_write(vc4_state, ccm[2]);
1254 vc4_state->lbm_offset = 0;
1256 if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
1257 vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
1258 vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
1259 vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
1260 /* Reserve a slot for the LBM Base Address. The real value will
1261 * be set when calling vc4_plane_allocate_lbm().
1263 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
1264 vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
1265 vc4_state->lbm_offset = vc4_state->dlist_count;
1266 vc4_dlist_counter_increment(vc4_state);
1269 if (num_planes > 1) {
1270 /* Emit Cb/Cr as channel 0 and Y as channel
1271 * 1. This matches how we set up scl0/scl1
1274 vc4_write_scaling_parameters(state, 1);
1276 vc4_write_scaling_parameters(state, 0);
1278 /* If any PPF setup was done, then all the kernel
1279 * pointers get uploaded.
1281 if (vc4_state->x_scaling[0] == VC4_SCALING_PPF ||
1282 vc4_state->y_scaling[0] == VC4_SCALING_PPF ||
1283 vc4_state->x_scaling[1] == VC4_SCALING_PPF ||
1284 vc4_state->y_scaling[1] == VC4_SCALING_PPF) {
1285 u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start,
1286 SCALER_PPF_KERNEL_OFFSET);
1289 vc4_dlist_write(vc4_state, kernel);
1291 vc4_dlist_write(vc4_state, kernel);
1293 vc4_dlist_write(vc4_state, kernel);
1295 vc4_dlist_write(vc4_state, kernel);
1299 vc4_state->dlist[ctl0_offset] |=
1300 VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
1302 /* crtc_* are already clipped coordinates. */
1303 covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
1304 vc4_state->crtc_w == state->crtc->mode.hdisplay &&
1305 vc4_state->crtc_h == state->crtc->mode.vdisplay;
1306 /* Background fill might be necessary when the plane has per-pixel
1307 * alpha content or a non-opaque plane alpha and could blend from the
1308 * background or does not cover the entire screen.
1310 vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
1311 state->alpha != DRM_BLEND_ALPHA_OPAQUE;
1313 /* Flag the dlist as initialized to avoid checking it twice in case
1314 * the async update check already called vc4_plane_mode_set() and
1315 * decided to fallback to sync update because async update was not
1318 vc4_state->dlist_initialized = 1;
1320 vc4_plane_calc_load(state);
1325 /* If a modeset involves changing the setup of a plane, the atomic
1326 * infrastructure will call this to validate a proposed plane setup.
1327 * However, if a plane isn't getting updated, this (and the
1328 * corresponding vc4_plane_atomic_update) won't get called. Thus, we
1329 * compute the dlist here and have all active plane dlists get updated
1330 * in the CRTC's flush.
1332 static int vc4_plane_atomic_check(struct drm_plane *plane,
1333 struct drm_atomic_state *state)
1335 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1337 struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
1340 vc4_state->dlist_count = 0;
1342 if (!plane_enabled(new_plane_state))
1345 ret = vc4_plane_mode_set(plane, new_plane_state);
1349 return vc4_plane_allocate_lbm(new_plane_state);
1352 static void vc4_plane_atomic_update(struct drm_plane *plane,
1353 struct drm_atomic_state *state)
1355 /* No contents here. Since we don't know where in the CRTC's
1356 * dlist we should be stored, our dlist is uploaded to the
1357 * hardware with vc4_plane_write_dlist() at CRTC atomic_flush
1362 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
1364 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
1367 vc4_state->hw_dlist = dlist;
1369 /* Can't memcpy_toio() because it needs to be 32-bit writes. */
1370 for (i = 0; i < vc4_state->dlist_count; i++)
1371 writel(vc4_state->dlist[i], &dlist[i]);
1373 return vc4_state->dlist_count;
1376 u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
1378 const struct vc4_plane_state *vc4_state =
1379 container_of(state, typeof(*vc4_state), base);
1381 return vc4_state->dlist_count;
1384 /* Updates the plane to immediately (well, once the FIFO needs
1385 * refilling) scan out from at a new framebuffer.
1387 void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
1389 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
1390 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
1393 /* We're skipping the address adjustment for negative origin,
1394 * because this is only called on the primary plane.
1396 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
1397 addr = bo->paddr + fb->offsets[0];
1399 /* Write the new address into the hardware immediately. The
1400 * scanout will start from this address as soon as the FIFO
1401 * needs to refill with pixels.
1403 writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
1405 /* Also update the CPU-side dlist copy, so that any later
1406 * atomic updates that don't do a new modeset on our plane
1407 * also use our updated address.
1409 vc4_state->dlist[vc4_state->ptr0_offset] = addr;
1412 static void vc4_plane_atomic_async_update(struct drm_plane *plane,
1413 struct drm_atomic_state *state)
1415 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1417 struct vc4_plane_state *vc4_state, *new_vc4_state;
1419 swap(plane->state->fb, new_plane_state->fb);
1420 plane->state->crtc_x = new_plane_state->crtc_x;
1421 plane->state->crtc_y = new_plane_state->crtc_y;
1422 plane->state->crtc_w = new_plane_state->crtc_w;
1423 plane->state->crtc_h = new_plane_state->crtc_h;
1424 plane->state->src_x = new_plane_state->src_x;
1425 plane->state->src_y = new_plane_state->src_y;
1426 plane->state->src_w = new_plane_state->src_w;
1427 plane->state->src_h = new_plane_state->src_h;
1428 plane->state->alpha = new_plane_state->alpha;
1429 plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode;
1430 plane->state->rotation = new_plane_state->rotation;
1431 plane->state->zpos = new_plane_state->zpos;
1432 plane->state->normalized_zpos = new_plane_state->normalized_zpos;
1433 plane->state->color_encoding = new_plane_state->color_encoding;
1434 plane->state->color_range = new_plane_state->color_range;
1435 plane->state->src = new_plane_state->src;
1436 plane->state->dst = new_plane_state->dst;
1437 plane->state->visible = new_plane_state->visible;
1439 new_vc4_state = to_vc4_plane_state(new_plane_state);
1440 vc4_state = to_vc4_plane_state(plane->state);
1442 vc4_state->crtc_x = new_vc4_state->crtc_x;
1443 vc4_state->crtc_y = new_vc4_state->crtc_y;
1444 vc4_state->crtc_h = new_vc4_state->crtc_h;
1445 vc4_state->crtc_w = new_vc4_state->crtc_w;
1446 vc4_state->src_x = new_vc4_state->src_x;
1447 vc4_state->src_y = new_vc4_state->src_y;
1448 memcpy(vc4_state->src_w, new_vc4_state->src_w,
1449 sizeof(vc4_state->src_w));
1450 memcpy(vc4_state->src_h, new_vc4_state->src_h,
1451 sizeof(vc4_state->src_h));
1452 memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
1453 sizeof(vc4_state->x_scaling));
1454 memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
1455 sizeof(vc4_state->y_scaling));
1456 vc4_state->is_unity = new_vc4_state->is_unity;
1457 vc4_state->is_yuv = new_vc4_state->is_yuv;
1458 memcpy(vc4_state->offsets, new_vc4_state->offsets,
1459 sizeof(vc4_state->offsets));
1460 vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
1462 /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
1463 vc4_state->dlist[vc4_state->pos0_offset] =
1464 new_vc4_state->dlist[vc4_state->pos0_offset];
1465 vc4_state->dlist[vc4_state->pos2_offset] =
1466 new_vc4_state->dlist[vc4_state->pos2_offset];
1467 vc4_state->dlist[vc4_state->ptr0_offset] =
1468 new_vc4_state->dlist[vc4_state->ptr0_offset];
1470 /* Note that we can't just call vc4_plane_write_dlist()
1471 * because that would smash the context data that the HVS is
1474 writel(vc4_state->dlist[vc4_state->pos0_offset],
1475 &vc4_state->hw_dlist[vc4_state->pos0_offset]);
1476 writel(vc4_state->dlist[vc4_state->pos2_offset],
1477 &vc4_state->hw_dlist[vc4_state->pos2_offset]);
1478 writel(vc4_state->dlist[vc4_state->ptr0_offset],
1479 &vc4_state->hw_dlist[vc4_state->ptr0_offset]);
1482 static int vc4_plane_atomic_async_check(struct drm_plane *plane,
1483 struct drm_atomic_state *state)
1485 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1487 struct vc4_plane_state *old_vc4_state, *new_vc4_state;
1491 ret = vc4_plane_mode_set(plane, new_plane_state);
1495 old_vc4_state = to_vc4_plane_state(plane->state);
1496 new_vc4_state = to_vc4_plane_state(new_plane_state);
1498 if (!new_vc4_state->hw_dlist)
1501 if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
1502 old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
1503 old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
1504 old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
1505 vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state))
1508 /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
1509 * if anything else has changed, fallback to a sync update.
1511 for (i = 0; i < new_vc4_state->dlist_count; i++) {
1512 if (i == new_vc4_state->pos0_offset ||
1513 i == new_vc4_state->pos2_offset ||
1514 i == new_vc4_state->ptr0_offset ||
1515 (new_vc4_state->lbm_offset &&
1516 i == new_vc4_state->lbm_offset))
1519 if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
1526 static int vc4_prepare_fb(struct drm_plane *plane,
1527 struct drm_plane_state *state)
1535 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
1537 drm_gem_plane_helper_prepare_fb(plane, state);
1539 if (plane->state->fb == state->fb)
1542 ret = vc4_bo_inc_usecnt(bo);
1549 static void vc4_cleanup_fb(struct drm_plane *plane,
1550 struct drm_plane_state *state)
1554 if (plane->state->fb == state->fb || !state->fb)
1557 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
1558 vc4_bo_dec_usecnt(bo);
1561 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
1562 .atomic_check = vc4_plane_atomic_check,
1563 .atomic_update = vc4_plane_atomic_update,
1564 .prepare_fb = vc4_prepare_fb,
1565 .cleanup_fb = vc4_cleanup_fb,
1566 .atomic_async_check = vc4_plane_atomic_async_check,
1567 .atomic_async_update = vc4_plane_atomic_async_update,
1570 static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = {
1571 .atomic_check = vc4_plane_atomic_check,
1572 .atomic_update = vc4_plane_atomic_update,
1573 .atomic_async_check = vc4_plane_atomic_async_check,
1574 .atomic_async_update = vc4_plane_atomic_async_update,
1577 static bool vc4_format_mod_supported(struct drm_plane *plane,
1581 /* Support T_TILING for RGB formats only. */
1583 case DRM_FORMAT_XRGB8888:
1584 case DRM_FORMAT_ARGB8888:
1585 case DRM_FORMAT_ABGR8888:
1586 case DRM_FORMAT_XBGR8888:
1587 case DRM_FORMAT_RGB565:
1588 case DRM_FORMAT_BGR565:
1589 case DRM_FORMAT_ARGB1555:
1590 case DRM_FORMAT_XRGB1555:
1591 switch (fourcc_mod_broadcom_mod(modifier)) {
1592 case DRM_FORMAT_MOD_LINEAR:
1593 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
1598 case DRM_FORMAT_NV12:
1599 case DRM_FORMAT_NV21:
1600 switch (fourcc_mod_broadcom_mod(modifier)) {
1601 case DRM_FORMAT_MOD_LINEAR:
1602 case DRM_FORMAT_MOD_BROADCOM_SAND64:
1603 case DRM_FORMAT_MOD_BROADCOM_SAND128:
1604 case DRM_FORMAT_MOD_BROADCOM_SAND256:
1609 case DRM_FORMAT_P030:
1610 switch (fourcc_mod_broadcom_mod(modifier)) {
1611 case DRM_FORMAT_MOD_BROADCOM_SAND128:
1616 case DRM_FORMAT_RGBX1010102:
1617 case DRM_FORMAT_BGRX1010102:
1618 case DRM_FORMAT_RGBA1010102:
1619 case DRM_FORMAT_BGRA1010102:
1620 case DRM_FORMAT_XRGB4444:
1621 case DRM_FORMAT_ARGB4444:
1622 case DRM_FORMAT_XBGR4444:
1623 case DRM_FORMAT_ABGR4444:
1624 case DRM_FORMAT_RGBX4444:
1625 case DRM_FORMAT_RGBA4444:
1626 case DRM_FORMAT_BGRX4444:
1627 case DRM_FORMAT_BGRA4444:
1628 case DRM_FORMAT_RGB332:
1629 case DRM_FORMAT_BGR233:
1630 case DRM_FORMAT_YUV422:
1631 case DRM_FORMAT_YVU422:
1632 case DRM_FORMAT_YUV420:
1633 case DRM_FORMAT_YVU420:
1634 case DRM_FORMAT_NV16:
1635 case DRM_FORMAT_NV61:
1637 return (modifier == DRM_FORMAT_MOD_LINEAR);
1641 static const struct drm_plane_funcs vc4_plane_funcs = {
1642 .update_plane = drm_atomic_helper_update_plane,
1643 .disable_plane = drm_atomic_helper_disable_plane,
1644 .reset = vc4_plane_reset,
1645 .atomic_duplicate_state = vc4_plane_duplicate_state,
1646 .atomic_destroy_state = vc4_plane_destroy_state,
1647 .format_mod_supported = vc4_format_mod_supported,
1650 struct drm_plane *vc4_plane_init(struct drm_device *dev,
1651 enum drm_plane_type type,
1652 uint32_t possible_crtcs)
1654 struct vc4_dev *vc4 = to_vc4_dev(dev);
1655 struct drm_plane *plane;
1656 struct vc4_plane *vc4_plane;
1657 u32 formats[ARRAY_SIZE(hvs_formats)];
1658 int num_formats = 0;
1660 static const uint64_t modifiers[] = {
1661 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
1662 DRM_FORMAT_MOD_BROADCOM_SAND128,
1663 DRM_FORMAT_MOD_BROADCOM_SAND64,
1664 DRM_FORMAT_MOD_BROADCOM_SAND256,
1665 DRM_FORMAT_MOD_LINEAR,
1666 DRM_FORMAT_MOD_INVALID
1669 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
1670 if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
1671 formats[num_formats] = hvs_formats[i].drm;
1676 vc4_plane = drmm_universal_plane_alloc(dev, struct vc4_plane, base,
1679 formats, num_formats,
1680 modifiers, type, NULL);
1681 if (IS_ERR(vc4_plane))
1682 return ERR_CAST(vc4_plane);
1683 plane = &vc4_plane->base;
1686 drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
1688 drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
1690 drm_plane_create_alpha_property(plane);
1691 drm_plane_create_blend_mode_property(plane,
1692 BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1693 BIT(DRM_MODE_BLEND_PREMULTI) |
1694 BIT(DRM_MODE_BLEND_COVERAGE));
1695 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1697 DRM_MODE_ROTATE_180 |
1698 DRM_MODE_REFLECT_X |
1699 DRM_MODE_REFLECT_Y);
1701 drm_plane_create_color_properties(plane,
1702 BIT(DRM_COLOR_YCBCR_BT601) |
1703 BIT(DRM_COLOR_YCBCR_BT709) |
1704 BIT(DRM_COLOR_YCBCR_BT2020),
1705 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1706 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1707 DRM_COLOR_YCBCR_BT709,
1708 DRM_COLOR_YCBCR_LIMITED_RANGE);
1710 drm_plane_create_chroma_siting_properties(plane, 0, 0);
1712 if (type == DRM_PLANE_TYPE_PRIMARY)
1713 drm_plane_create_zpos_immutable_property(plane, 0);
1718 #define VC4_NUM_OVERLAY_PLANES 16
1720 int vc4_plane_create_additional_planes(struct drm_device *drm)
1722 struct drm_plane *cursor_plane;
1723 struct drm_crtc *crtc;
1726 /* Set up some arbitrary number of planes. We're not limited
1727 * by a set number of physical registers, just the space in
1728 * the HVS (16k) and how small an plane can be (28 bytes).
1729 * However, each plane we set up takes up some memory, and
1730 * increases the cost of looping over planes, which atomic
1731 * modesetting does quite a bit. As a result, we pick a
1732 * modest number of planes to expose, that should hopefully
1733 * still cover any sane usecase.
1735 for (i = 0; i < VC4_NUM_OVERLAY_PLANES; i++) {
1736 struct drm_plane *plane =
1737 vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
1738 GENMASK(drm->mode_config.num_crtc - 1, 0));
1743 /* Create zpos property. Max of all the overlays + 1 primary +
1744 * 1 cursor plane on a crtc.
1746 drm_plane_create_zpos_property(plane, i + 1, 1,
1747 VC4_NUM_OVERLAY_PLANES + 1);
1750 drm_for_each_crtc(crtc, drm) {
1751 /* Set up the legacy cursor after overlay initialization,
1752 * since the zpos fallback is that planes are rendered by plane
1753 * ID order, and that then puts the cursor on top.
1755 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
1756 drm_crtc_mask(crtc));
1757 if (!IS_ERR(cursor_plane)) {
1758 crtc->cursor = cursor_plane;
1760 drm_plane_create_zpos_property(cursor_plane,
1761 VC4_NUM_OVERLAY_PLANES + 1,
1763 VC4_NUM_OVERLAY_PLANES + 1);