drm/vc4: kms: Add functions to create the state objects
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / vc4 / vc4_kms.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5
6 /**
7  * DOC: VC4 KMS
8  *
9  * This is the general code for implementing KMS mode setting that
10  * doesn't clearly associate with any of the other objects (plane,
11  * crtc, HDMI encoder).
12  */
13
14 #include <linux/bitfield.h>
15 #include <linux/bitops.h>
16 #include <linux/clk.h>
17
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc.h>
21 #include <drm/drm_gem_framebuffer_helper.h>
22 #include <drm/drm_plane_helper.h>
23 #include <drm/drm_probe_helper.h>
24 #include <drm/drm_vblank.h>
25 #include <drm/drm_drv.h>
26
27 #include "vc4_drv.h"
28 #include "vc4_regs.h"
29
30 #define HVS_NUM_CHANNELS 3
31
32 struct vc4_ctm_state {
33         struct drm_private_state base;
34         struct drm_color_ctm *ctm;
35         int fifo;
36 };
37
38 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
39 {
40         return container_of(priv, struct vc4_ctm_state, base);
41 }
42
43 struct vc4_load_tracker_state {
44         struct drm_private_state base;
45         u64 hvs_load;
46         u64 membus_load;
47 };
48
49 static struct vc4_load_tracker_state *
50 to_vc4_load_tracker_state(struct drm_private_state *priv)
51 {
52         return container_of(priv, struct vc4_load_tracker_state, base);
53 }
54
55 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
56                                                struct drm_private_obj *manager)
57 {
58         struct drm_device *dev = state->dev;
59         struct vc4_dev *vc4 = dev->dev_private;
60         struct drm_private_state *priv_state;
61         int ret;
62
63         ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
64         if (ret)
65                 return ERR_PTR(ret);
66
67         priv_state = drm_atomic_get_private_obj_state(state, manager);
68         if (IS_ERR(priv_state))
69                 return ERR_CAST(priv_state);
70
71         return to_vc4_ctm_state(priv_state);
72 }
73
74 static struct drm_private_state *
75 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
76 {
77         struct vc4_ctm_state *state;
78
79         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
80         if (!state)
81                 return NULL;
82
83         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
84
85         return &state->base;
86 }
87
88 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
89                                   struct drm_private_state *state)
90 {
91         struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
92
93         kfree(ctm_state);
94 }
95
96 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
97         .atomic_duplicate_state = vc4_ctm_duplicate_state,
98         .atomic_destroy_state = vc4_ctm_destroy_state,
99 };
100
101 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
102 {
103         struct vc4_ctm_state *ctm_state;
104
105         drm_modeset_lock_init(&vc4->ctm_state_lock);
106
107         ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
108         if (!ctm_state)
109                 return -ENOMEM;
110
111         drm_atomic_private_obj_init(vc4->dev, &vc4->ctm_manager, &ctm_state->base,
112                         &vc4_ctm_state_funcs);
113
114         return 0;
115 }
116
117 static void vc4_ctm_obj_fini(struct vc4_dev *vc4)
118 {
119         drm_atomic_private_obj_fini(&vc4->ctm_manager);
120 }
121
122 /* Converts a DRM S31.32 value to the HW S0.9 format. */
123 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
124 {
125         u16 r;
126
127         /* Sign bit. */
128         r = in & BIT_ULL(63) ? BIT(9) : 0;
129
130         if ((in & GENMASK_ULL(62, 32)) > 0) {
131                 /* We have zero integer bits so we can only saturate here. */
132                 r |= GENMASK(8, 0);
133         } else {
134                 /* Otherwise take the 9 most important fractional bits. */
135                 r |= (in >> 23) & GENMASK(8, 0);
136         }
137
138         return r;
139 }
140
141 static void
142 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
143 {
144         struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
145         struct drm_color_ctm *ctm = ctm_state->ctm;
146
147         if (vc4->firmware_kms)
148                 return;
149
150         if (ctm_state->fifo) {
151                 HVS_WRITE(SCALER_OLEDCOEF2,
152                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
153                                         SCALER_OLEDCOEF2_R_TO_R) |
154                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
155                                         SCALER_OLEDCOEF2_R_TO_G) |
156                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
157                                         SCALER_OLEDCOEF2_R_TO_B));
158                 HVS_WRITE(SCALER_OLEDCOEF1,
159                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
160                                         SCALER_OLEDCOEF1_G_TO_R) |
161                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
162                                         SCALER_OLEDCOEF1_G_TO_G) |
163                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
164                                         SCALER_OLEDCOEF1_G_TO_B));
165                 HVS_WRITE(SCALER_OLEDCOEF0,
166                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
167                                         SCALER_OLEDCOEF0_B_TO_R) |
168                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
169                                         SCALER_OLEDCOEF0_B_TO_G) |
170                           VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
171                                         SCALER_OLEDCOEF0_B_TO_B));
172         }
173
174         HVS_WRITE(SCALER_OLEDOFFS,
175                   VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
176 }
177
178 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
179                                      struct drm_atomic_state *state)
180 {
181         struct drm_crtc_state *crtc_state;
182         struct drm_crtc *crtc;
183         unsigned int i;
184
185         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
186                 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
187                 u32 dispctrl;
188                 u32 dsp3_mux;
189
190                 if (!crtc_state->active)
191                         continue;
192
193                 if (vc4_state->assigned_channel != 2)
194                         continue;
195
196                 /*
197                  * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
198                  * FIFO X'.
199                  * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
200                  *
201                  * DSP3 is connected to FIFO2 unless the transposer is
202                  * enabled. In this case, FIFO 2 is directly accessed by the
203                  * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
204                  * route.
205                  */
206                 if (vc4_state->feed_txp)
207                         dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
208                 else
209                         dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
210
211                 dispctrl = HVS_READ(SCALER_DISPCTRL) &
212                            ~SCALER_DISPCTRL_DSP3_MUX_MASK;
213                 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
214         }
215 }
216
217 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
218                                      struct drm_atomic_state *state)
219 {
220         struct drm_crtc_state *crtc_state;
221         struct drm_crtc *crtc;
222         unsigned char dsp2_mux = 0;
223         unsigned char dsp3_mux = 3;
224         unsigned char dsp4_mux = 3;
225         unsigned char dsp5_mux = 3;
226         unsigned int i;
227         u32 reg;
228
229         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
230                 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
231                 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
232
233                 if (!crtc_state->active)
234                         continue;
235
236                 switch (vc4_crtc->data->hvs_output) {
237                 case 2:
238                         dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
239                         break;
240
241                 case 3:
242                         dsp3_mux = vc4_state->assigned_channel;
243                         break;
244
245                 case 4:
246                         dsp4_mux = vc4_state->assigned_channel;
247                         break;
248
249                 case 5:
250                         dsp5_mux = vc4_state->assigned_channel;
251                         break;
252
253                 default:
254                         break;
255                 }
256         }
257
258         reg = HVS_READ(SCALER_DISPECTRL);
259         HVS_WRITE(SCALER_DISPECTRL,
260                   (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
261                   VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
262
263         reg = HVS_READ(SCALER_DISPCTRL);
264         HVS_WRITE(SCALER_DISPCTRL,
265                   (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
266                   VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
267
268         reg = HVS_READ(SCALER_DISPEOLN);
269         HVS_WRITE(SCALER_DISPEOLN,
270                   (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
271                   VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
272
273         reg = HVS_READ(SCALER_DISPDITHER);
274         HVS_WRITE(SCALER_DISPDITHER,
275                   (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
276                   VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
277 }
278
279
280 static void
281 vc4_atomic_complete_commit(struct drm_atomic_state *state)
282 {
283         struct drm_device *dev = state->dev;
284         struct vc4_dev *vc4 = to_vc4_dev(dev);
285         struct vc4_hvs *hvs = vc4->hvs;
286         struct vc4_crtc *vc4_crtc;
287         int i;
288
289         for (i = 0; vc4->hvs && i < dev->mode_config.num_crtc; i++) {
290                 struct __drm_crtcs_state *_state = &state->crtcs[i];
291                 struct vc4_crtc_state *vc4_crtc_state;
292
293                 if (!_state->ptr || !_state->commit)
294                         continue;
295
296                 vc4_crtc = to_vc4_crtc(_state->ptr);
297                 vc4_crtc_state = to_vc4_crtc_state(_state->state);
298                 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
299         }
300
301         if (vc4->hvs->hvs5)
302                 clk_set_min_rate(hvs->core_clk, 500000000);
303
304         drm_atomic_helper_wait_for_fences(dev, state, false);
305
306         drm_atomic_helper_wait_for_dependencies(state);
307
308         drm_atomic_helper_commit_modeset_disables(dev, state);
309
310         vc4_ctm_commit(vc4, state);
311
312         if (vc4->hvs->hvs5)
313                 vc5_hvs_pv_muxing_commit(vc4, state);
314         else
315                 vc4_hvs_pv_muxing_commit(vc4, state);
316
317         drm_atomic_helper_commit_planes(dev, state, 0);
318
319         drm_atomic_helper_commit_modeset_enables(dev, state);
320
321         drm_atomic_helper_fake_vblank(state);
322
323         drm_atomic_helper_commit_hw_done(state);
324
325         drm_atomic_helper_wait_for_flip_done(dev, state);
326
327         drm_atomic_helper_cleanup_planes(dev, state);
328
329         drm_atomic_helper_commit_cleanup_done(state);
330
331         drm_atomic_state_put(state);
332
333         up(&vc4->async_modeset);
334 }
335
336 static void commit_work(struct work_struct *work)
337 {
338         struct drm_atomic_state *state = container_of(work,
339                                                       struct drm_atomic_state,
340                                                       commit_work);
341         vc4_atomic_complete_commit(state);
342 }
343
344 /**
345  * vc4_atomic_commit - commit validated state object
346  * @dev: DRM device
347  * @state: the driver state object
348  * @nonblock: nonblocking commit
349  *
350  * This function commits a with drm_atomic_helper_check() pre-validated state
351  * object. This can still fail when e.g. the framebuffer reservation fails. For
352  * now this doesn't implement asynchronous commits.
353  *
354  * RETURNS
355  * Zero for success or -errno.
356  */
357 static int vc4_atomic_commit(struct drm_device *dev,
358                              struct drm_atomic_state *state,
359                              bool nonblock)
360 {
361         struct vc4_dev *vc4 = to_vc4_dev(dev);
362         int ret;
363
364         if (state->async_update) {
365                 ret = down_interruptible(&vc4->async_modeset);
366                 if (ret)
367                         return ret;
368
369                 ret = drm_atomic_helper_prepare_planes(dev, state);
370                 if (ret) {
371                         up(&vc4->async_modeset);
372                         return ret;
373                 }
374
375                 drm_atomic_helper_async_commit(dev, state);
376
377                 drm_atomic_helper_cleanup_planes(dev, state);
378
379                 up(&vc4->async_modeset);
380
381                 return 0;
382         }
383
384         /* We know for sure we don't want an async update here. Set
385          * state->legacy_cursor_update to false to prevent
386          * drm_atomic_helper_setup_commit() from auto-completing
387          * commit->flip_done.
388          */
389         if (!vc4->firmware_kms)
390                 state->legacy_cursor_update = false;
391         ret = drm_atomic_helper_setup_commit(state, nonblock);
392         if (ret)
393                 return ret;
394
395         INIT_WORK(&state->commit_work, commit_work);
396
397         ret = down_interruptible(&vc4->async_modeset);
398         if (ret)
399                 return ret;
400
401         ret = drm_atomic_helper_prepare_planes(dev, state);
402         if (ret) {
403                 up(&vc4->async_modeset);
404                 return ret;
405         }
406
407         if (!nonblock) {
408                 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
409                 if (ret) {
410                         drm_atomic_helper_cleanup_planes(dev, state);
411                         up(&vc4->async_modeset);
412                         return ret;
413                 }
414         }
415
416         /*
417          * This is the point of no return - everything below never fails except
418          * when the hw goes bonghits. Which means we can commit the new state on
419          * the software side now.
420          */
421
422         BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
423
424         /*
425          * Everything below can be run asynchronously without the need to grab
426          * any modeset locks at all under one condition: It must be guaranteed
427          * that the asynchronous work has either been cancelled (if the driver
428          * supports it, which at least requires that the framebuffers get
429          * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
430          * before the new state gets committed on the software side with
431          * drm_atomic_helper_swap_state().
432          *
433          * This scheme allows new atomic state updates to be prepared and
434          * checked in parallel to the asynchronous completion of the previous
435          * update. Which is important since compositors need to figure out the
436          * composition of the next frame right after having submitted the
437          * current layout.
438          */
439
440         drm_atomic_state_get(state);
441         if (nonblock)
442                 queue_work(system_unbound_wq, &state->commit_work);
443         else
444                 vc4_atomic_complete_commit(state);
445
446         return 0;
447 }
448
449 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
450                                              struct drm_file *file_priv,
451                                              const struct drm_mode_fb_cmd2 *mode_cmd)
452 {
453         struct drm_mode_fb_cmd2 mode_cmd_local;
454
455         /* If the user didn't specify a modifier, use the
456          * vc4_set_tiling_ioctl() state for the BO.
457          */
458         if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
459                 struct drm_gem_object *gem_obj;
460                 struct vc4_bo *bo;
461
462                 gem_obj = drm_gem_object_lookup(file_priv,
463                                                 mode_cmd->handles[0]);
464                 if (!gem_obj) {
465                         DRM_DEBUG("Failed to look up GEM BO %d\n",
466                                   mode_cmd->handles[0]);
467                         return ERR_PTR(-ENOENT);
468                 }
469                 bo = to_vc4_bo(gem_obj);
470
471                 mode_cmd_local = *mode_cmd;
472
473                 if (bo->t_format) {
474                         mode_cmd_local.modifier[0] =
475                                 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
476                 } else {
477                         mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
478                 }
479
480                 drm_gem_object_put_unlocked(gem_obj);
481
482                 mode_cmd = &mode_cmd_local;
483         }
484
485         return drm_gem_fb_create(dev, file_priv, mode_cmd);
486 }
487
488 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
489  * at a time and the HW only supports S0.9 scalars. To account for the latter,
490  * we don't allow userland to set a CTM that we have no hope of approximating.
491  */
492 static int
493 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
494 {
495         struct vc4_dev *vc4 = to_vc4_dev(dev);
496         struct vc4_ctm_state *ctm_state = NULL;
497         struct drm_crtc *crtc;
498         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
499         struct drm_color_ctm *ctm;
500         int i;
501
502         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
503                 /* CTM is being disabled. */
504                 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
505                         ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
506                         if (IS_ERR(ctm_state))
507                                 return PTR_ERR(ctm_state);
508                         ctm_state->fifo = 0;
509                 }
510         }
511
512         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
513                 if (new_crtc_state->ctm == old_crtc_state->ctm)
514                         continue;
515
516                 if (!ctm_state) {
517                         ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
518                         if (IS_ERR(ctm_state))
519                                 return PTR_ERR(ctm_state);
520                 }
521
522                 /* CTM is being enabled or the matrix changed. */
523                 if (new_crtc_state->ctm) {
524                         struct vc4_crtc_state *vc4_crtc_state =
525                                 to_vc4_crtc_state(new_crtc_state);
526
527                         /* fifo is 1-based since 0 disables CTM. */
528                         int fifo = vc4_crtc_state->assigned_channel + 1;
529
530                         /* Check userland isn't trying to turn on CTM for more
531                          * than one CRTC at a time.
532                          */
533                         if (ctm_state->fifo && ctm_state->fifo != fifo) {
534                                 DRM_DEBUG_DRIVER("Too many CTM configured\n");
535                                 return -EINVAL;
536                         }
537
538                         /* Check we can approximate the specified CTM.
539                          * We disallow scalars |c| > 1.0 since the HW has
540                          * no integer bits.
541                          */
542                         ctm = new_crtc_state->ctm->data;
543                         for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
544                                 u64 val = ctm->matrix[i];
545
546                                 val &= ~BIT_ULL(63);
547                                 if (val > BIT_ULL(32))
548                                         return -EINVAL;
549                         }
550
551                         ctm_state->fifo = fifo;
552                         ctm_state->ctm = ctm;
553                 }
554         }
555
556         return 0;
557 }
558
559 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
560 {
561         struct drm_plane_state *old_plane_state, *new_plane_state;
562         struct vc4_dev *vc4 = to_vc4_dev(state->dev);
563         struct vc4_load_tracker_state *load_state;
564         struct drm_private_state *priv_state;
565         struct drm_plane *plane;
566         int i;
567
568         if (!vc4->load_tracker_available)
569                 return 0;
570
571         priv_state = drm_atomic_get_private_obj_state(state,
572                                                       &vc4->load_tracker);
573         if (IS_ERR(priv_state))
574                 return PTR_ERR(priv_state);
575
576         load_state = to_vc4_load_tracker_state(priv_state);
577         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
578                                        new_plane_state, i) {
579                 struct vc4_plane_state *vc4_plane_state;
580
581                 if (old_plane_state->fb && old_plane_state->crtc) {
582                         vc4_plane_state = to_vc4_plane_state(old_plane_state);
583                         load_state->membus_load -= vc4_plane_state->membus_load;
584                         load_state->hvs_load -= vc4_plane_state->hvs_load;
585                 }
586
587                 if (new_plane_state->fb && new_plane_state->crtc) {
588                         vc4_plane_state = to_vc4_plane_state(new_plane_state);
589                         load_state->membus_load += vc4_plane_state->membus_load;
590                         load_state->hvs_load += vc4_plane_state->hvs_load;
591                 }
592         }
593
594         /* Don't check the load when the tracker is disabled. */
595         if (!vc4->load_tracker_enabled)
596                 return 0;
597
598         /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
599          * the system work when other blocks are accessing the memory.
600          */
601         if (load_state->membus_load > SZ_1G + SZ_512M)
602                 return -ENOSPC;
603
604         /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
605          * consider the maximum number of cycles is 240M.
606          */
607         if (load_state->hvs_load > 240000000ULL)
608                 return -ENOSPC;
609
610         return 0;
611 }
612
613 static struct drm_private_state *
614 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
615 {
616         struct vc4_load_tracker_state *state;
617
618         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
619         if (!state)
620                 return NULL;
621
622         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
623
624         return &state->base;
625 }
626
627 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
628                                            struct drm_private_state *state)
629 {
630         struct vc4_load_tracker_state *load_state;
631
632         load_state = to_vc4_load_tracker_state(state);
633         kfree(load_state);
634 }
635
636 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
637         .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
638         .atomic_destroy_state = vc4_load_tracker_destroy_state,
639 };
640
641 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
642 {
643         struct vc4_load_tracker_state *load_state;
644
645         if (!vc4->load_tracker_available)
646                 return 0;
647
648         load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
649         if (!load_state)
650                 return -ENOMEM;
651
652         drm_atomic_private_obj_init(vc4->dev, &vc4->load_tracker,
653                         &load_state->base,
654                         &vc4_load_tracker_state_funcs);
655
656         return 0;
657 }
658
659 /*
660  * The BCM2711 HVS has up to 7 output connected to the pixelvalves and
661  * the TXP (and therefore all the CRTCs found on that platform).
662  *
663  * The naive (and our initial) implementation would just iterate over
664  * all the active CRTCs, try to find a suitable FIFO, and then remove it
665  * from the available FIFOs pool. However, there's a few corner cases
666  * that need to be considered:
667  *
668  * - When running in a dual-display setup (so with two CRTCs involved),
669  *   we can update the state of a single CRTC (for example by changing
670  *   its mode using xrandr under X11) without affecting the other. In
671  *   this case, the other CRTC wouldn't be in the state at all, so we
672  *   need to consider all the running CRTCs in the DRM device to assign
673  *   a FIFO, not just the one in the state.
674  *
675  * - Since we need the pixelvalve to be disabled and enabled back when
676  *   the FIFO is changed, we should keep the FIFO assigned for as long
677  *   as the CRTC is enabled, only considering it free again once that
678  *   CRTC has been disabled. This can be tested by booting X11 on a
679  *   single display, and changing the resolution down and then back up.
680  */
681 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
682                                       struct drm_atomic_state *state)
683 {
684         unsigned long unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
685         struct vc4_dev *vc4 = to_vc4_dev(state->dev);
686         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
687         struct drm_crtc *crtc;
688         unsigned int i;
689
690         /*
691          * Since the HVS FIFOs are shared across all the pixelvalves and
692          * the TXP (and thus all the CRTCs), we need to pull the current
693          * state of all the enabled CRTCs so that an update to a single
694          * CRTC still keeps the previous FIFOs enabled and assigned to
695          * the same CRTCs, instead of evaluating only the CRTC being
696          * modified.
697          */
698         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
699                 struct drm_crtc_state *crtc_state;
700                 if (!crtc->state->enable)
701                         continue;
702
703                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
704                 if (IS_ERR(crtc_state))
705                         return PTR_ERR(crtc_state);
706         }
707
708         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
709                 struct vc4_crtc_state *new_vc4_crtc_state =
710                         to_vc4_crtc_state(new_crtc_state);
711                 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
712                 bool is_assigned = false;
713                 unsigned int channel;
714
715                 if (old_crtc_state->enable && !new_crtc_state->enable)
716                         new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
717
718                 if (!new_crtc_state->enable)
719                         continue;
720
721                 if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
722                         unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
723                         continue;
724                 }
725
726                 /*
727                  * The problem we have to solve here is that we have
728                  * up to 7 encoders, connected to up to 6 CRTCs.
729                  *
730                  * Those CRTCs, depending on the instance, can be
731                  * routed to 1, 2 or 3 HVS FIFOs, and we need to set
732                  * the change the muxing between FIFOs and outputs in
733                  * the HVS accordingly.
734                  *
735                  * It would be pretty hard to come up with an
736                  * algorithm that would generically solve
737                  * this. However, the current routing trees we support
738                  * allow us to simplify a bit the problem.
739                  *
740                  * Indeed, with the current supported layouts, if we
741                  * try to assign in the ascending crtc index order the
742                  * FIFOs, we can't fall into the situation where an
743                  * earlier CRTC that had multiple routes is assigned
744                  * one that was the only option for a later CRTC.
745                  *
746                  * If the layout changes and doesn't give us that in
747                  * the future, we will need to have something smarter,
748                  * but it works so far.
749                  */
750                 for_each_set_bit(channel, &unassigned_channels,
751                                  sizeof(unassigned_channels)) {
752
753                         if (!(BIT(channel) & vc4_crtc->data->hvs_available_channels))
754                                 continue;
755
756                         new_vc4_crtc_state->assigned_channel = channel;
757                         unassigned_channels &= ~BIT(channel);
758                         is_assigned = true;
759                         break;
760                 }
761
762                 if (!is_assigned)
763                         return -EINVAL;
764         }
765
766         return 0;
767 }
768
769 static int
770 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
771 {
772         int ret;
773
774         ret = vc4_pv_muxing_atomic_check(dev, state);
775         if (ret)
776                 return ret;
777
778         ret = vc4_ctm_atomic_check(dev, state);
779         if (ret < 0)
780                 return ret;
781
782         ret = drm_atomic_helper_check(dev, state);
783         if (ret)
784                 return ret;
785
786         return vc4_load_tracker_atomic_check(state);
787 }
788
789 static const struct drm_mode_config_funcs vc4_mode_funcs = {
790         .atomic_check = vc4_atomic_check,
791         .atomic_commit = vc4_atomic_commit,
792         .fb_create = vc4_fb_create,
793 };
794
795 int vc4_kms_load(struct drm_device *dev)
796 {
797         struct vc4_dev *vc4 = to_vc4_dev(dev);
798         int ret;
799
800         if (!of_device_is_compatible(dev->dev->of_node, "brcm,bcm2711-vc5")) {
801                 vc4->load_tracker_available = true;
802
803                 /* Start with the load tracker enabled. Can be
804                  * disabled through the debugfs load_tracker file.
805                  */
806                 vc4->load_tracker_enabled = true;
807         }
808
809         sema_init(&vc4->async_modeset, 1);
810
811         /* Set support for vblank irq fast disable, before drm_vblank_init() */
812         dev->vblank_disable_immediate = true;
813
814         dev->irq_enabled = true;
815         ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
816         if (ret < 0) {
817                 dev_err(dev->dev, "failed to initialize vblank\n");
818                 return ret;
819         }
820
821         if (!drm_core_check_feature(dev, DRIVER_RENDER)) {
822                 /* No V3D as part of vc4. Assume this is Pi4. */
823                 dev->mode_config.max_width = 7680;
824                 dev->mode_config.max_height = 7680;
825         } else {
826                 dev->mode_config.max_width = 2048;
827                 dev->mode_config.max_height = 2048;
828         }
829         dev->mode_config.funcs = &vc4_mode_funcs;
830         dev->mode_config.preferred_depth = 24;
831         dev->mode_config.async_page_flip = true;
832         dev->mode_config.allow_fb_modifiers = true;
833         dev->mode_config.normalize_zpos = true;
834
835         ret = vc4_ctm_obj_init(vc4);
836         if (ret)
837                 return ret;
838
839         ret = vc4_load_tracker_obj_init(vc4);
840         if (ret)
841                 goto ctm_fini;
842
843         drm_mode_config_reset(dev);
844
845         drm_kms_helper_poll_init(dev);
846
847         return 0;
848
849 ctm_fini:
850         vc4_ctm_obj_fini(vc4);
851
852         return ret;
853 }