1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Broadcom
9 * The Hardware Video Scaler (HVS) is the piece of hardware that does
10 * translation, scaling, colorspace conversion, and compositing of
11 * pixels stored in framebuffers into a FIFO of pixels going out to
12 * the Pixel Valve (CRTC). It operates at the system clock rate (the
13 * system audio clock gate, specifically), which is much higher than
14 * the pixel clock rate.
16 * There is a single global HVS, with multiple output FIFOs that can
17 * be consumed by the PVs. This file just manages the resources for
18 * the HVS, while the vc4_crtc.c code actually drives HVS setup for
22 #include <linux/bitfield.h>
23 #include <linux/clk.h>
24 #include <linux/component.h>
25 #include <linux/platform_device.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_drv.h>
29 #include <drm/drm_vblank.h>
34 static const struct debugfs_reg32 hvs_regs[] = {
35 VC4_REG32(SCALER_DISPCTRL),
36 VC4_REG32(SCALER_DISPSTAT),
37 VC4_REG32(SCALER_DISPID),
38 VC4_REG32(SCALER_DISPECTRL),
39 VC4_REG32(SCALER_DISPPROF),
40 VC4_REG32(SCALER_DISPDITHER),
41 VC4_REG32(SCALER_DISPEOLN),
42 VC4_REG32(SCALER_DISPLIST0),
43 VC4_REG32(SCALER_DISPLIST1),
44 VC4_REG32(SCALER_DISPLIST2),
45 VC4_REG32(SCALER_DISPLSTAT),
46 VC4_REG32(SCALER_DISPLACT0),
47 VC4_REG32(SCALER_DISPLACT1),
48 VC4_REG32(SCALER_DISPLACT2),
49 VC4_REG32(SCALER_DISPCTRL0),
50 VC4_REG32(SCALER_DISPBKGND0),
51 VC4_REG32(SCALER_DISPSTAT0),
52 VC4_REG32(SCALER_DISPBASE0),
53 VC4_REG32(SCALER_DISPCTRL1),
54 VC4_REG32(SCALER_DISPBKGND1),
55 VC4_REG32(SCALER_DISPSTAT1),
56 VC4_REG32(SCALER_DISPBASE1),
57 VC4_REG32(SCALER_DISPCTRL2),
58 VC4_REG32(SCALER_DISPBKGND2),
59 VC4_REG32(SCALER_DISPSTAT2),
60 VC4_REG32(SCALER_DISPBASE2),
61 VC4_REG32(SCALER_DISPALPHA2),
62 VC4_REG32(SCALER_OLEDOFFS),
63 VC4_REG32(SCALER_OLEDCOEF0),
64 VC4_REG32(SCALER_OLEDCOEF1),
65 VC4_REG32(SCALER_OLEDCOEF2),
68 void vc4_hvs_dump_state(struct vc4_hvs *hvs)
70 struct drm_device *drm = &hvs->vc4->base;
71 struct drm_printer p = drm_info_printer(&hvs->pdev->dev);
74 drm_print_regset32(&p, &hvs->regset);
76 if (!drm_dev_enter(drm, &idx))
79 DRM_INFO("HVS ctx:\n");
80 for (i = 0; i < 64; i += 4) {
81 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
82 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
83 readl((u32 __iomem *)hvs->dlist + i + 0),
84 readl((u32 __iomem *)hvs->dlist + i + 1),
85 readl((u32 __iomem *)hvs->dlist + i + 2),
86 readl((u32 __iomem *)hvs->dlist + i + 3));
92 static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
94 struct drm_info_node *node = m->private;
95 struct drm_device *dev = node->minor->dev;
96 struct vc4_dev *vc4 = to_vc4_dev(dev);
97 struct drm_printer p = drm_seq_file_printer(m);
99 drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
104 static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
106 struct drm_info_node *node = m->private;
107 struct drm_device *dev = node->minor->dev;
108 struct vc4_dev *vc4 = to_vc4_dev(dev);
109 struct vc4_hvs *hvs = vc4->hvs;
110 struct drm_printer p = drm_seq_file_printer(m);
111 unsigned int next_entry_start = 0;
113 u32 dlist_word, dispstat;
115 for (i = 0; i < SCALER_CHANNELS_COUNT; i++) {
116 dispstat = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(i)),
117 SCALER_DISPSTATX_MODE);
118 if (dispstat == SCALER_DISPSTATX_MODE_DISABLED ||
119 dispstat == SCALER_DISPSTATX_MODE_EOF) {
120 drm_printf(&p, "HVS chan %u disabled\n", i);
124 drm_printf(&p, "HVS chan %u:\n", i);
126 for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) {
127 dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j);
128 drm_printf(&p, "dlist: %02d: 0x%08x\n", j,
130 if (!next_entry_start ||
131 next_entry_start == j) {
132 if (dlist_word & SCALER_CTL0_END)
134 next_entry_start = j +
135 VC4_GET_FIELD(dlist_word,
144 static int vc5_hvs_debugfs_gamma(struct seq_file *m, void *data)
146 struct drm_info_node *node = m->private;
147 struct drm_device *dev = node->minor->dev;
148 struct vc4_dev *vc4 = to_vc4_dev(dev);
149 struct vc4_hvs *hvs = vc4->hvs;
150 struct drm_printer p = drm_seq_file_printer(m);
151 unsigned int i, chan;
152 u32 dispstat, dispbkgndx;
154 for (chan = 0; chan < SCALER_CHANNELS_COUNT; chan++) {
156 u32 offset = SCALER5_DSPGAMMA_START +
157 chan * SCALER5_DSPGAMMA_CHAN_OFFSET;
159 dispstat = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
160 SCALER_DISPSTATX_MODE);
161 if (dispstat == SCALER_DISPSTATX_MODE_DISABLED ||
162 dispstat == SCALER_DISPSTATX_MODE_EOF) {
163 drm_printf(&p, "HVS channel %u: Channel disabled\n", chan);
167 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
168 if (!(dispbkgndx & SCALER_DISPBKGND_GAMMA)) {
169 drm_printf(&p, "HVS channel %u: Gamma disabled\n", chan);
173 drm_printf(&p, "HVS channel %u:\n", chan);
174 drm_printf(&p, " red:\n");
175 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
176 x_c = HVS_READ(offset);
177 grad = HVS_READ(offset + 4);
178 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
180 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
181 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
184 drm_printf(&p, " green:\n");
185 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
186 x_c = HVS_READ(offset);
187 grad = HVS_READ(offset + 4);
188 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
190 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
191 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
194 drm_printf(&p, " blue:\n");
195 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
196 x_c = HVS_READ(offset);
197 grad = HVS_READ(offset + 4);
198 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
200 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
201 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
205 /* Alpha only valid on channel 2 */
209 drm_printf(&p, " alpha:\n");
210 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8) {
211 x_c = HVS_READ(offset);
212 grad = HVS_READ(offset + 4);
213 drm_printf(&p, " %08x %08x - x %u, c %u, grad %u\n",
215 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_X),
216 VC4_GET_FIELD(x_c, SCALER5_DSPGAMMA_OFF_C),
223 /* The filter kernel is composed of dwords each containing 3 9-bit
224 * signed integers packed next to each other.
226 #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
227 #define VC4_PPF_FILTER_WORD(c0, c1, c2) \
228 ((((c0) & 0x1ff) << 0) | \
229 (((c1) & 0x1ff) << 9) | \
230 (((c2) & 0x1ff) << 18))
232 /* The whole filter kernel is arranged as the coefficients 0-16 going
233 * up, then a pad, then 17-31 going down and reversed within the
234 * dwords. This means that a linear phase kernel (where it's
235 * symmetrical at the boundary between 15 and 16) has the last 5
236 * dwords matching the first 5, but reversed.
238 #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \
239 c9, c10, c11, c12, c13, c14, c15) \
240 {VC4_PPF_FILTER_WORD(c0, c1, c2), \
241 VC4_PPF_FILTER_WORD(c3, c4, c5), \
242 VC4_PPF_FILTER_WORD(c6, c7, c8), \
243 VC4_PPF_FILTER_WORD(c9, c10, c11), \
244 VC4_PPF_FILTER_WORD(c12, c13, c14), \
245 VC4_PPF_FILTER_WORD(c15, c15, 0)}
247 #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
248 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
250 /* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali.
251 * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
253 static const u32 mitchell_netravali_1_3_1_3_kernel[] =
254 VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
255 50, 82, 119, 155, 187, 213, 227);
257 static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
258 struct drm_mm_node *space,
262 u32 __iomem *dst_kernel;
265 * NOTE: We don't need a call to drm_dev_enter()/drm_dev_exit()
266 * here since that function is only called from vc4_hvs_bind().
269 ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
271 DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
276 dst_kernel = hvs->dlist + space->start;
278 for (i = 0; i < VC4_KERNEL_DWORDS; i++) {
279 if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS)
280 writel(kernel[i], &dst_kernel[i]);
282 writel(kernel[VC4_KERNEL_DWORDS - i - 1],
290 static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
291 struct vc4_crtc *vc4_crtc)
293 struct drm_device *drm = &hvs->vc4->base;
294 struct drm_crtc *crtc = &vc4_crtc->base;
295 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
299 if (!drm_dev_enter(drm, &idx))
302 /* The LUT memory is laid out with each HVS channel in order,
303 * each of which takes 256 writes for R, 256 for G, then 256
306 HVS_WRITE(SCALER_GAMADDR,
307 SCALER_GAMADDR_AUTOINC |
308 (vc4_state->assigned_channel * 3 * crtc->gamma_size));
310 for (i = 0; i < crtc->gamma_size; i++)
311 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]);
312 for (i = 0; i < crtc->gamma_size; i++)
313 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
314 for (i = 0; i < crtc->gamma_size; i++)
315 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
320 static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
321 struct vc4_crtc *vc4_crtc)
323 struct drm_crtc_state *crtc_state = vc4_crtc->base.state;
324 struct drm_color_lut *lut = crtc_state->gamma_lut->data;
325 u32 length = drm_color_lut_size(crtc_state->gamma_lut);
328 for (i = 0; i < length; i++) {
329 vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8);
330 vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8);
331 vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8);
334 vc4_hvs_lut_load(hvs, vc4_crtc);
337 static void vc5_hvs_write_gamma_entry(struct vc4_hvs *hvs,
339 struct vc5_gamma_entry *gamma)
341 HVS_WRITE(offset, gamma->x_c_terms);
342 HVS_WRITE(offset + 4, gamma->grad_term);
345 static void vc5_hvs_lut_load(struct vc4_hvs *hvs,
346 struct vc4_crtc *vc4_crtc)
348 struct drm_crtc_state *crtc_state = vc4_crtc->base.state;
349 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
351 u32 offset = SCALER5_DSPGAMMA_START +
352 vc4_state->assigned_channel * SCALER5_DSPGAMMA_CHAN_OFFSET;
354 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
355 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_r[i]);
356 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
357 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_g[i]);
358 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
359 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_b[i]);
361 if (vc4_state->assigned_channel == 2) {
362 /* Alpha only valid on channel 2 */
363 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++, offset += 8)
364 vc5_hvs_write_gamma_entry(hvs, offset, &vc4_crtc->pwl_a[i]);
368 static void vc5_hvs_update_gamma_lut(struct vc4_hvs *hvs,
369 struct vc4_crtc *vc4_crtc)
371 struct drm_crtc *crtc = &vc4_crtc->base;
372 struct drm_color_lut *lut = crtc->state->gamma_lut->data;
373 unsigned int step, i;
376 #define VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl, chan) \
377 start = drm_color_lut_extract(lut[i * step].chan, 12); \
378 end = drm_color_lut_extract(lut[(i + 1) * step - 1].chan, 12); \
380 /* Negative gradients not permitted by the hardware, so \
381 * flatten such points out. \
386 /* Assume 12bit pipeline. \
387 * X evenly spread over full range (12 bit). \
388 * C as U12.4 format. \
389 * Gradient as U4.8 format. \
392 VC5_HVS_SET_GAMMA_ENTRY(i << 8, start << 4, \
393 ((end - start) << 4) / (step - 1))
395 /* HVS5 has a 16 point piecewise linear function for each colour
396 * channel (including alpha on channel 2) on each display channel.
398 * Currently take a crude subsample of the gamma LUT, but this could
399 * be improved to implement curve fitting.
401 step = crtc->gamma_size / SCALER5_DSPGAMMA_NUM_POINTS;
402 for (i = 0; i < SCALER5_DSPGAMMA_NUM_POINTS; i++) {
403 VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl_r, red);
404 VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl_g, green);
405 VC5_HVS_UPDATE_GAMMA_ENTRY_FROM_LUT(pwl_b, blue);
408 vc5_hvs_lut_load(hvs, vc4_crtc);
411 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
413 struct drm_device *drm = &hvs->vc4->base;
417 if (!drm_dev_enter(drm, &idx))
422 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
423 SCALER_DISPSTAT1_FRCNT0);
426 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
427 SCALER_DISPSTAT1_FRCNT1);
430 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
431 SCALER_DISPSTAT2_FRCNT2);
439 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
441 struct vc4_dev *vc4 = hvs->vc4;
449 * NOTE: We should probably use drm_dev_enter()/drm_dev_exit()
450 * here, but this function is only used during the DRM device
451 * initialization, so we should be fine.
462 reg = HVS_READ(SCALER_DISPECTRL);
463 ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg);
470 reg = HVS_READ(SCALER_DISPCTRL);
471 ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg);
478 reg = HVS_READ(SCALER_DISPEOLN);
479 ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg);
486 reg = HVS_READ(SCALER_DISPDITHER);
487 ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg);
498 static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
499 struct drm_display_mode *mode, bool oneshot)
501 struct vc4_dev *vc4 = hvs->vc4;
502 struct drm_device *drm = &vc4->base;
503 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
504 struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
505 unsigned int chan = vc4_crtc_state->assigned_channel;
506 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
511 if (!drm_dev_enter(drm, &idx))
514 HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
515 HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
516 HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
518 /* Turn on the scaler, which will wait for vstart to start
520 * When feeding the transposer, we should operate in oneshot
523 dispctrl = SCALER_DISPCTRLX_ENABLE;
524 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
527 dispctrl |= VC4_SET_FIELD(mode->hdisplay,
528 SCALER_DISPCTRLX_WIDTH) |
529 VC4_SET_FIELD(mode->vdisplay,
530 SCALER_DISPCTRLX_HEIGHT) |
531 (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
532 dispbkgndx |= SCALER_DISPBKGND_AUTOHS;
534 dispctrl |= VC4_SET_FIELD(mode->hdisplay,
535 SCALER5_DISPCTRLX_WIDTH) |
536 VC4_SET_FIELD(mode->vdisplay,
537 SCALER5_DISPCTRLX_HEIGHT) |
538 (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
539 dispbkgndx &= ~SCALER5_DISPBKGND_BCK2BCK;
542 HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
544 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
545 dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
547 if (crtc->state->gamma_lut)
548 /* Enable gamma on if required */
549 dispbkgndx |= SCALER_DISPBKGND_GAMMA;
551 HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
552 (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
554 /* Reload the LUT, since the SRAMs would have been disabled if
555 * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
558 vc4_hvs_lut_load(hvs, vc4_crtc);
560 vc5_hvs_lut_load(hvs, vc4_crtc);
567 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
569 struct drm_device *drm = &hvs->vc4->base;
572 if (!drm_dev_enter(drm, &idx))
575 if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
578 HVS_WRITE(SCALER_DISPCTRLX(chan),
579 HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
580 HVS_WRITE(SCALER_DISPCTRLX(chan),
581 HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE);
583 /* Once we leave, the scaler should be disabled and its fifo empty. */
584 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
586 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
587 SCALER_DISPSTATX_MODE) !=
588 SCALER_DISPSTATX_MODE_DISABLED);
590 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
591 (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
592 SCALER_DISPSTATX_EMPTY);
598 static int vc4_hvs_gamma_check(struct drm_crtc *crtc,
599 struct drm_atomic_state *state)
601 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
602 struct drm_connector_state *conn_state;
603 struct drm_connector *connector;
604 struct drm_device *dev = crtc->dev;
605 struct vc4_dev *vc4 = to_vc4_dev(dev);
610 if (!crtc_state->color_mgmt_changed)
613 if (crtc_state->gamma_lut) {
614 unsigned int len = drm_color_lut_size(crtc_state->gamma_lut);
616 if (len != crtc->gamma_size) {
617 DRM_DEBUG_KMS("Invalid LUT size; got %u, expected %u\n",
618 len, crtc->gamma_size);
623 connector = vc4_get_crtc_connector(crtc, crtc_state);
627 if (!(connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
630 conn_state = drm_atomic_get_connector_state(state, connector);
634 crtc_state->mode_changed = true;
638 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
640 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
641 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
642 struct drm_device *dev = crtc->dev;
643 struct vc4_dev *vc4 = to_vc4_dev(dev);
644 struct drm_plane *plane;
646 const struct drm_plane_state *plane_state;
650 /* The pixelvalve can only feed one encoder (and encoders are
651 * 1:1 with connectors.)
653 if (hweight32(crtc_state->connector_mask) > 1)
656 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
657 dlist_count += vc4_plane_dlist_size(plane_state);
659 dlist_count++; /* Account for SCALER_CTL0_END. */
661 spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
662 ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
664 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
668 return vc4_hvs_gamma_check(crtc, state);
671 static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
673 struct drm_device *dev = crtc->dev;
674 struct vc4_dev *vc4 = to_vc4_dev(dev);
675 struct vc4_hvs *hvs = vc4->hvs;
676 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
679 if (!drm_dev_enter(dev, &idx))
682 HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
683 vc4_state->mm.start);
688 static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
690 struct drm_device *dev = crtc->dev;
691 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
692 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
695 if (crtc->state->event) {
696 crtc->state->event->pipe = drm_crtc_index(crtc);
698 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
700 spin_lock_irqsave(&dev->event_lock, flags);
702 if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) {
703 vc4_crtc->event = crtc->state->event;
704 crtc->state->event = NULL;
707 spin_unlock_irqrestore(&dev->event_lock, flags);
710 spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
711 vc4_crtc->current_dlist = vc4_state->mm.start;
712 spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
715 void vc4_hvs_atomic_begin(struct drm_crtc *crtc,
716 struct drm_atomic_state *state)
718 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
719 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
722 spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
723 vc4_crtc->current_hvs_channel = vc4_state->assigned_channel;
724 spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
727 void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
728 struct drm_atomic_state *state)
730 struct drm_device *dev = crtc->dev;
731 struct vc4_dev *vc4 = to_vc4_dev(dev);
732 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
733 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
734 bool oneshot = vc4_crtc->feeds_txp;
736 vc4_hvs_install_dlist(crtc);
737 vc4_hvs_update_dlist(crtc);
738 vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
741 void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
742 struct drm_atomic_state *state)
744 struct drm_device *dev = crtc->dev;
745 struct vc4_dev *vc4 = to_vc4_dev(dev);
746 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
747 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
748 unsigned int chan = vc4_state->assigned_channel;
750 vc4_hvs_stop_channel(vc4->hvs, chan);
753 void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
754 struct drm_atomic_state *state)
756 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
758 struct drm_device *dev = crtc->dev;
759 struct vc4_dev *vc4 = to_vc4_dev(dev);
760 struct vc4_hvs *hvs = vc4->hvs;
761 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
762 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
763 unsigned int channel = vc4_state->assigned_channel;
764 struct drm_plane *plane;
765 struct vc4_plane_state *vc4_plane_state;
766 bool debug_dump_regs = false;
767 bool enable_bg_fill = false;
768 u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
769 u32 __iomem *dlist_next = dlist_start;
770 unsigned int zpos = 0;
774 if (!drm_dev_enter(dev, &idx)) {
775 vc4_crtc_send_vblank(crtc);
779 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) {
784 if (debug_dump_regs) {
785 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
786 vc4_hvs_dump_state(hvs);
789 /* Copy all the active planes' dlist contents to the hardware dlist. */
793 drm_atomic_crtc_for_each_plane(plane, crtc) {
794 if (plane->state->normalized_zpos != zpos)
797 /* Is this the first active plane? */
798 if (dlist_next == dlist_start) {
799 /* We need to enable background fill when a plane
800 * could be alpha blending from the background, i.e.
801 * where no other plane is underneath. It suffices to
802 * consider the first active plane here since we set
803 * needs_bg_fill such that either the first plane
804 * already needs it or all planes on top blend from
805 * the first or a lower plane.
807 vc4_plane_state = to_vc4_plane_state(plane->state);
808 enable_bg_fill = vc4_plane_state->needs_bg_fill;
811 dlist_next += vc4_plane_write_dlist(plane, dlist_next);
819 writel(SCALER_CTL0_END, dlist_next);
822 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
825 /* This sets a black background color fill, as is the case
826 * with other DRM drivers.
828 HVS_WRITE(SCALER_DISPBKGNDX(channel),
829 HVS_READ(SCALER_DISPBKGNDX(channel)) |
830 SCALER_DISPBKGND_FILL);
832 /* Only update DISPLIST if the CRTC was already running and is not
834 * vc4_crtc_enable() takes care of updating the dlist just after
835 * re-enabling VBLANK interrupts and before enabling the engine.
836 * If the CRTC is being disabled, there's no point in updating this
839 if (crtc->state->active && old_state->active) {
840 vc4_hvs_install_dlist(crtc);
841 vc4_hvs_update_dlist(crtc);
844 if (crtc->state->color_mgmt_changed) {
845 u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
847 if (crtc->state->gamma_lut) {
849 vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
850 dispbkgndx |= SCALER_DISPBKGND_GAMMA;
852 vc5_hvs_update_gamma_lut(hvs, vc4_crtc);
855 /* Unsetting DISPBKGND_GAMMA skips the gamma lut step
856 * in hardware, which is the same as a linear lut that
857 * DRM expects us to use in absence of a user lut.
859 * Do NOT change state dynamically for hvs5 as it
860 * inserts a delay in the pipeline that will cause
861 * stalls if enabled/disabled whilst running. The other
862 * should already be disabling/enabling the pipeline
863 * when gamma changes.
866 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
868 HVS_WRITE(SCALER_DISPBKGNDX(channel), dispbkgndx);
871 if (debug_dump_regs) {
872 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
873 vc4_hvs_dump_state(hvs);
879 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
881 struct drm_device *drm = &hvs->vc4->base;
885 if (!drm_dev_enter(drm, &idx))
888 dispctrl = HVS_READ(SCALER_DISPCTRL);
889 dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
890 SCALER_DISPCTRL_DSPEISLUR(channel));
892 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
897 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
899 struct drm_device *drm = &hvs->vc4->base;
903 if (!drm_dev_enter(drm, &idx))
906 dispctrl = HVS_READ(SCALER_DISPCTRL);
907 dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
908 SCALER_DISPCTRL_DSPEISLUR(channel));
910 HVS_WRITE(SCALER_DISPSTAT,
911 SCALER_DISPSTAT_EUFLOW(channel));
912 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
917 static void vc4_hvs_report_underrun(struct drm_device *dev)
919 struct vc4_dev *vc4 = to_vc4_dev(dev);
921 atomic_inc(&vc4->underrun);
922 DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
925 static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
927 struct drm_device *dev = data;
928 struct vc4_dev *vc4 = to_vc4_dev(dev);
929 struct vc4_hvs *hvs = vc4->hvs;
930 irqreturn_t irqret = IRQ_NONE;
937 * NOTE: We don't need to protect the register access using
938 * drm_dev_enter() there because the interrupt handler lifetime
939 * is tied to the device itself, and not to the DRM device.
941 * So when the device will be gone, one of the first thing we
942 * will be doing will be to unregister the interrupt handler,
943 * and then unregister the DRM device. drm_dev_enter() would
944 * thus always succeed if we are here.
947 status = HVS_READ(SCALER_DISPSTAT);
948 control = HVS_READ(SCALER_DISPCTRL);
950 for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
951 dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
952 SCALER_DISPCTRL_DSPEISLUR(channel);
953 /* Interrupt masking is not always honored, so check it here. */
954 if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
955 control & dspeislur) {
956 vc4_hvs_mask_underrun(hvs, channel);
957 vc4_hvs_report_underrun(dev);
959 irqret = IRQ_HANDLED;
963 /* Clear every per-channel interrupt flag. */
964 HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
965 SCALER_DISPSTAT_IRQMASK(1) |
966 SCALER_DISPSTAT_IRQMASK(2));
971 int vc4_hvs_debugfs_init(struct drm_minor *minor)
973 struct drm_device *drm = minor->dev;
974 struct vc4_dev *vc4 = to_vc4_dev(drm);
975 struct vc4_hvs *hvs = vc4->hvs;
978 if (vc4->firmware_kms)
985 debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
987 &vc4->load_tracker_enabled);
990 vc4_debugfs_add_file(minor, "hvs_gamma",
991 vc5_hvs_debugfs_gamma, NULL);
993 ret = vc4_debugfs_add_file(minor, "hvs_underrun",
994 vc4_hvs_debugfs_underrun, NULL);
998 ret = vc4_debugfs_add_regset32(minor, "hvs_regs",
1003 ret = vc4_debugfs_add_file(minor, "hvs_dlists",
1004 vc4_hvs_debugfs_dlist, NULL);
1011 static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
1013 struct platform_device *pdev = to_platform_device(dev);
1014 struct drm_device *drm = dev_get_drvdata(master);
1015 struct vc4_dev *vc4 = to_vc4_dev(drm);
1016 struct vc4_hvs *hvs = NULL;
1021 hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
1027 hvs->regs = vc4_ioremap_regs(pdev, 0);
1028 if (IS_ERR(hvs->regs))
1029 return PTR_ERR(hvs->regs);
1031 hvs->regset.base = hvs->regs;
1032 hvs->regset.regs = hvs_regs;
1033 hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
1036 unsigned long min_rate;
1037 unsigned long max_rate;
1039 hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
1040 if (IS_ERR(hvs->core_clk)) {
1041 dev_err(&pdev->dev, "Couldn't get core clock\n");
1042 return PTR_ERR(hvs->core_clk);
1045 max_rate = clk_get_max_rate(hvs->core_clk);
1046 if (max_rate >= 550000000)
1047 hvs->vc5_hdmi_enable_scrambling = true;
1049 min_rate = clk_get_min_rate(hvs->core_clk);
1050 if (min_rate >= 600000000)
1051 hvs->vc5_hdmi_enable_4096by2160 = true;
1053 ret = clk_prepare_enable(hvs->core_clk);
1055 dev_err(&pdev->dev, "Couldn't enable the core clock\n");
1061 hvs->dlist = hvs->regs + SCALER_DLIST_START;
1063 hvs->dlist = hvs->regs + SCALER5_DLIST_START;
1065 spin_lock_init(&hvs->mm_lock);
1067 /* Set up the HVS display list memory manager. We never
1068 * overwrite the setup from the bootloader (just 128b out of
1069 * our 16K), since we don't want to scramble the screen when
1070 * transitioning from the firmware's boot setup to runtime.
1072 drm_mm_init(&hvs->dlist_mm,
1073 HVS_BOOTLOADER_DLIST_END,
1074 (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
1076 /* Set up the HVS LBM memory manager. We could have some more
1077 * complicated data structure that allowed reuse of LBM areas
1078 * between planes when they don't overlap on the screen, but
1079 * for now we just allocate globally.
1082 /* 48k words of 2x12-bit pixels */
1083 drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
1085 /* 60k words of 4x12-bit pixels */
1086 drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
1088 /* Upload filter kernels. We only have the one for now, so we
1089 * keep it around for the lifetime of the driver.
1091 ret = vc4_hvs_upload_linear_kernel(hvs,
1092 &hvs->mitchell_netravali_filter,
1093 mitchell_netravali_1_3_1_3_kernel);
1099 reg = HVS_READ(SCALER_DISPECTRL);
1100 reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
1101 HVS_WRITE(SCALER_DISPECTRL,
1102 reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX));
1104 reg = HVS_READ(SCALER_DISPCTRL);
1105 reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
1106 HVS_WRITE(SCALER_DISPCTRL,
1107 reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX));
1109 reg = HVS_READ(SCALER_DISPEOLN);
1110 reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK;
1111 HVS_WRITE(SCALER_DISPEOLN,
1112 reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX));
1114 reg = HVS_READ(SCALER_DISPDITHER);
1115 reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK;
1116 HVS_WRITE(SCALER_DISPDITHER,
1117 reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
1119 dispctrl = HVS_READ(SCALER_DISPCTRL);
1121 dispctrl |= SCALER_DISPCTRL_ENABLE;
1122 dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
1123 SCALER_DISPCTRL_DISPEIRQ(1) |
1124 SCALER_DISPCTRL_DISPEIRQ(2);
1127 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
1128 SCALER_DISPCTRL_SLVWREIRQ |
1129 SCALER_DISPCTRL_SLVRDEIRQ |
1130 SCALER_DISPCTRL_DSPEIEOF(0) |
1131 SCALER_DISPCTRL_DSPEIEOF(1) |
1132 SCALER_DISPCTRL_DSPEIEOF(2) |
1133 SCALER_DISPCTRL_DSPEIEOLN(0) |
1134 SCALER_DISPCTRL_DSPEIEOLN(1) |
1135 SCALER_DISPCTRL_DSPEIEOLN(2) |
1136 SCALER_DISPCTRL_DSPEISLUR(0) |
1137 SCALER_DISPCTRL_DSPEISLUR(1) |
1138 SCALER_DISPCTRL_DSPEISLUR(2) |
1139 SCALER_DISPCTRL_SCLEIRQ);
1141 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
1142 SCALER5_DISPCTRL_SLVEIRQ |
1143 SCALER5_DISPCTRL_DSPEIEOF(0) |
1144 SCALER5_DISPCTRL_DSPEIEOF(1) |
1145 SCALER5_DISPCTRL_DSPEIEOF(2) |
1146 SCALER5_DISPCTRL_DSPEIEOLN(0) |
1147 SCALER5_DISPCTRL_DSPEIEOLN(1) |
1148 SCALER5_DISPCTRL_DSPEIEOLN(2) |
1149 SCALER5_DISPCTRL_DSPEISLUR(0) |
1150 SCALER5_DISPCTRL_DSPEISLUR(1) |
1151 SCALER5_DISPCTRL_DSPEISLUR(2) |
1152 SCALER_DISPCTRL_SCLEIRQ);
1155 /* Set AXI panic mode.
1156 * VC4 panics when < 2 lines in FIFO.
1157 * VC5 panics when less than 1 line in the FIFO.
1159 dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
1160 SCALER_DISPCTRL_PANIC1_MASK |
1161 SCALER_DISPCTRL_PANIC2_MASK);
1162 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
1163 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
1164 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
1166 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
1168 /* Recompute Composite Output Buffer (COB) allocations for the displays
1171 /* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
1172 * The bottom 2048 pixels are full 32bpp RGBA (intended for the
1173 * TXP composing RGBA to memory), whilst the remainder are only
1176 * Assign 3 lines to channels 1 & 2, and just over 4 lines to
1179 #define VC4_COB_SIZE 20736
1180 #define VC4_COB_LINE_WIDTH 2048
1181 #define VC4_COB_NUM_LINES 3
1183 top = VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
1184 reg |= (top - 1) << 16;
1185 HVS_WRITE(SCALER_DISPBASE2, reg);
1187 top += VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
1188 reg |= (top - 1) << 16;
1189 HVS_WRITE(SCALER_DISPBASE1, reg);
1192 reg |= (top - 1) << 16;
1193 HVS_WRITE(SCALER_DISPBASE0, reg);
1195 /* The COB is 44416 pixels, or 10.8 lines at 4096 wide.
1196 * The bottom 4096 pixels are full RGBA (intended for the TXP
1197 * composing RGBA to memory), whilst the remainder are only
1198 * RGB. Addressing is always pixel wide.
1200 * Assign 3 lines of 4096 to channels 1 & 2, and just over 4
1201 * lines. to channel 0.
1203 #define VC5_COB_SIZE 44416
1204 #define VC5_COB_LINE_WIDTH 4096
1205 #define VC5_COB_NUM_LINES 3
1207 top = VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
1209 HVS_WRITE(SCALER_DISPBASE2, reg);
1212 top += VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
1214 HVS_WRITE(SCALER_DISPBASE1, reg);
1219 HVS_WRITE(SCALER_DISPBASE0, reg);
1222 ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
1223 vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
1230 static void vc4_hvs_unbind(struct device *dev, struct device *master,
1233 struct drm_device *drm = dev_get_drvdata(master);
1234 struct vc4_dev *vc4 = to_vc4_dev(drm);
1235 struct vc4_hvs *hvs = vc4->hvs;
1236 struct drm_mm_node *node, *next;
1238 if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
1239 drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
1241 drm_mm_for_each_node_safe(node, next, &vc4->hvs->dlist_mm)
1242 drm_mm_remove_node(node);
1244 drm_mm_takedown(&vc4->hvs->dlist_mm);
1246 drm_mm_for_each_node_safe(node, next, &vc4->hvs->lbm_mm)
1247 drm_mm_remove_node(node);
1248 drm_mm_takedown(&vc4->hvs->lbm_mm);
1250 clk_disable_unprepare(hvs->core_clk);
1255 static const struct component_ops vc4_hvs_ops = {
1256 .bind = vc4_hvs_bind,
1257 .unbind = vc4_hvs_unbind,
1260 static int vc4_hvs_dev_probe(struct platform_device *pdev)
1262 return component_add(&pdev->dev, &vc4_hvs_ops);
1265 static int vc4_hvs_dev_remove(struct platform_device *pdev)
1267 component_del(&pdev->dev, &vc4_hvs_ops);
1271 static const struct of_device_id vc4_hvs_dt_match[] = {
1272 { .compatible = "brcm,bcm2711-hvs" },
1273 { .compatible = "brcm,bcm2835-hvs" },
1277 struct platform_driver vc4_hvs_driver = {
1278 .probe = vc4_hvs_dev_probe,
1279 .remove = vc4_hvs_dev_remove,
1282 .of_match_table = vc4_hvs_dt_match,