Merge tag 'drm-misc-next-2020-01-02' of git://anongit.freedesktop.org/drm/drm-misc...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / arm / display / komeda / d71 / d71_dev.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4  * Author: James.Qian.Wang <james.qian.wang@arm.com>
5  *
6  */
7
8 #include <drm/drm_print.h>
9 #include "d71_dev.h"
10 #include "malidp_io.h"
11
12 static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
13 {
14         u32 __iomem *reg = d71_pipeline->lpu_addr;
15         u32 status, raw_status;
16         u64 evts = 0ULL;
17
18         raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
19         if (raw_status & LPU_IRQ_IBSY)
20                 evts |= KOMEDA_EVENT_IBSY;
21         if (raw_status & LPU_IRQ_EOW)
22                 evts |= KOMEDA_EVENT_EOW;
23         if (raw_status & LPU_IRQ_OVR)
24                 evts |= KOMEDA_EVENT_OVR;
25
26         if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY | LPU_IRQ_OVR)) {
27                 u32 restore = 0, tbu_status;
28                 /* Check error of LPU status */
29                 status = malidp_read32(reg, BLK_STATUS);
30                 if (status & LPU_STATUS_AXIE) {
31                         restore |= LPU_STATUS_AXIE;
32                         evts |= KOMEDA_ERR_AXIE;
33                 }
34                 if (status & LPU_STATUS_ACE0) {
35                         restore |= LPU_STATUS_ACE0;
36                         evts |= KOMEDA_ERR_ACE0;
37                 }
38                 if (status & LPU_STATUS_ACE1) {
39                         restore |= LPU_STATUS_ACE1;
40                         evts |= KOMEDA_ERR_ACE1;
41                 }
42                 if (status & LPU_STATUS_ACE2) {
43                         restore |= LPU_STATUS_ACE2;
44                         evts |= KOMEDA_ERR_ACE2;
45                 }
46                 if (status & LPU_STATUS_ACE3) {
47                         restore |= LPU_STATUS_ACE3;
48                         evts |= KOMEDA_ERR_ACE3;
49                 }
50                 if (status & LPU_STATUS_FEMPTY) {
51                         restore |= LPU_STATUS_FEMPTY;
52                         evts |= KOMEDA_EVENT_EMPTY;
53                 }
54                 if (status & LPU_STATUS_FFULL) {
55                         restore |= LPU_STATUS_FFULL;
56                         evts |= KOMEDA_EVENT_FULL;
57                 }
58
59                 if (restore != 0)
60                         malidp_write32_mask(reg, BLK_STATUS, restore, 0);
61
62                 restore = 0;
63                 /* Check errors of TBU status */
64                 tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
65                 if (tbu_status & LPU_TBU_STATUS_TCF) {
66                         restore |= LPU_TBU_STATUS_TCF;
67                         evts |= KOMEDA_ERR_TCF;
68                 }
69                 if (tbu_status & LPU_TBU_STATUS_TTNG) {
70                         restore |= LPU_TBU_STATUS_TTNG;
71                         evts |= KOMEDA_ERR_TTNG;
72                 }
73                 if (tbu_status & LPU_TBU_STATUS_TITR) {
74                         restore |= LPU_TBU_STATUS_TITR;
75                         evts |= KOMEDA_ERR_TITR;
76                 }
77                 if (tbu_status & LPU_TBU_STATUS_TEMR) {
78                         restore |= LPU_TBU_STATUS_TEMR;
79                         evts |= KOMEDA_ERR_TEMR;
80                 }
81                 if (tbu_status & LPU_TBU_STATUS_TTF) {
82                         restore |= LPU_TBU_STATUS_TTF;
83                         evts |= KOMEDA_ERR_TTF;
84                 }
85                 if (restore != 0)
86                         malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
87         }
88
89         malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
90         return evts;
91 }
92
93 static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
94 {
95         u32 __iomem *reg = d71_pipeline->cu_addr;
96         u32 status, raw_status;
97         u64 evts = 0ULL;
98
99         raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
100         if (raw_status & CU_IRQ_OVR)
101                 evts |= KOMEDA_EVENT_OVR;
102
103         if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
104                 status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
105                 if (status & CU_STATUS_CPE)
106                         evts |= KOMEDA_ERR_CPE;
107                 if (status & CU_STATUS_ZME)
108                         evts |= KOMEDA_ERR_ZME;
109                 if (status & CU_STATUS_CFGE)
110                         evts |= KOMEDA_ERR_CFGE;
111                 if (status)
112                         malidp_write32_mask(reg, BLK_STATUS, status, 0);
113         }
114
115         malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
116
117         return evts;
118 }
119
120 static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
121 {
122         u32 __iomem *reg = d71_pipeline->dou_addr;
123         u32 status, raw_status;
124         u64 evts = 0ULL;
125
126         raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
127         if (raw_status & DOU_IRQ_PL0)
128                 evts |= KOMEDA_EVENT_VSYNC;
129         if (raw_status & DOU_IRQ_UND)
130                 evts |= KOMEDA_EVENT_URUN;
131
132         if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
133                 u32 restore  = 0;
134
135                 status = malidp_read32(reg, BLK_STATUS);
136                 if (status & DOU_STATUS_DRIFTTO) {
137                         restore |= DOU_STATUS_DRIFTTO;
138                         evts |= KOMEDA_ERR_DRIFTTO;
139                 }
140                 if (status & DOU_STATUS_FRAMETO) {
141                         restore |= DOU_STATUS_FRAMETO;
142                         evts |= KOMEDA_ERR_FRAMETO;
143                 }
144                 if (status & DOU_STATUS_TETO) {
145                         restore |= DOU_STATUS_TETO;
146                         evts |= KOMEDA_ERR_TETO;
147                 }
148                 if (status & DOU_STATUS_CSCE) {
149                         restore |= DOU_STATUS_CSCE;
150                         evts |= KOMEDA_ERR_CSCE;
151                 }
152
153                 if (restore != 0)
154                         malidp_write32_mask(reg, BLK_STATUS, restore, 0);
155         }
156
157         malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
158         return evts;
159 }
160
161 static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
162 {
163         u32 evts = 0ULL;
164
165         if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
166                 evts |= get_lpu_event(d71_pipeline);
167
168         if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
169                 evts |= get_cu_event(d71_pipeline);
170
171         if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
172                 evts |= get_dou_event(d71_pipeline);
173
174         return evts;
175 }
176
177 static irqreturn_t
178 d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
179 {
180         struct d71_dev *d71 = mdev->chip_data;
181         u32 status, gcu_status, raw_status;
182
183         gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
184
185         if (gcu_status & GLB_IRQ_STATUS_GCU) {
186                 raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
187                 if (raw_status & GCU_IRQ_CVAL0)
188                         evts->pipes[0] |= KOMEDA_EVENT_FLIP;
189                 if (raw_status & GCU_IRQ_CVAL1)
190                         evts->pipes[1] |= KOMEDA_EVENT_FLIP;
191                 if (raw_status & GCU_IRQ_ERR) {
192                         status = malidp_read32(d71->gcu_addr, BLK_STATUS);
193                         if (status & GCU_STATUS_MERR) {
194                                 evts->global |= KOMEDA_ERR_MERR;
195                                 malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
196                                                     GCU_STATUS_MERR, 0);
197                         }
198                 }
199
200                 malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
201         }
202
203         if (gcu_status & GLB_IRQ_STATUS_PIPE0)
204                 evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
205
206         if (gcu_status & GLB_IRQ_STATUS_PIPE1)
207                 evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
208
209         return IRQ_RETVAL(gcu_status);
210 }
211
212 #define ENABLED_GCU_IRQS        (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
213                                  GCU_IRQ_MODE | GCU_IRQ_ERR)
214 #define ENABLED_LPU_IRQS        (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
215 #define ENABLED_CU_IRQS         (CU_IRQ_OVR | CU_IRQ_ERR)
216 #define ENABLED_DOU_IRQS        (DOU_IRQ_UND | DOU_IRQ_ERR)
217
218 static int d71_enable_irq(struct komeda_dev *mdev)
219 {
220         struct d71_dev *d71 = mdev->chip_data;
221         struct d71_pipeline *pipe;
222         u32 i;
223
224         malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
225                             ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
226         for (i = 0; i < d71->num_pipelines; i++) {
227                 pipe = d71->pipes[i];
228                 malidp_write32_mask(pipe->cu_addr,  BLK_IRQ_MASK,
229                                     ENABLED_CU_IRQS, ENABLED_CU_IRQS);
230                 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
231                                     ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
232                 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
233                                     ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
234         }
235         return 0;
236 }
237
238 static int d71_disable_irq(struct komeda_dev *mdev)
239 {
240         struct d71_dev *d71 = mdev->chip_data;
241         struct d71_pipeline *pipe;
242         u32 i;
243
244         malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
245         for (i = 0; i < d71->num_pipelines; i++) {
246                 pipe = d71->pipes[i];
247                 malidp_write32_mask(pipe->cu_addr,  BLK_IRQ_MASK,
248                                     ENABLED_CU_IRQS, 0);
249                 malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
250                                     ENABLED_LPU_IRQS, 0);
251                 malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
252                                     ENABLED_DOU_IRQS, 0);
253         }
254         return 0;
255 }
256
257 static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
258 {
259         struct d71_dev *d71 = mdev->chip_data;
260         struct d71_pipeline *pipe = d71->pipes[master_pipe];
261
262         malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
263                             DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
264 }
265
266 static int to_d71_opmode(int core_mode)
267 {
268         switch (core_mode) {
269         case KOMEDA_MODE_DISP0:
270                 return DO0_ACTIVE_MODE;
271         case KOMEDA_MODE_DISP1:
272                 return DO1_ACTIVE_MODE;
273         case KOMEDA_MODE_DUAL_DISP:
274                 return DO01_ACTIVE_MODE;
275         case KOMEDA_MODE_INACTIVE:
276                 return INACTIVE_MODE;
277         default:
278                 WARN(1, "Unknown operation mode");
279                 return INACTIVE_MODE;
280         }
281 }
282
283 static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
284 {
285         struct d71_dev *d71 = mdev->chip_data;
286         u32 opmode = to_d71_opmode(new_mode);
287         int ret;
288
289         malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
290
291         ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
292                            100, 1000, 10000);
293
294         return ret;
295 }
296
297 static void d71_flush(struct komeda_dev *mdev,
298                       int master_pipe, u32 active_pipes)
299 {
300         struct d71_dev *d71 = mdev->chip_data;
301         u32 reg_offset = (master_pipe == 0) ?
302                          GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
303
304         malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
305 }
306
307 static int d71_reset(struct d71_dev *d71)
308 {
309         u32 __iomem *gcu = d71->gcu_addr;
310         int ret;
311
312         malidp_write32_mask(gcu, BLK_CONTROL,
313                             GCU_CONTROL_SRST, GCU_CONTROL_SRST);
314
315         ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
316                            100, 1000, 10000);
317
318         return ret;
319 }
320
321 void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
322 {
323         int i;
324
325         blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
326         if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
327                 return;
328
329         blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
330
331         /* get valid input and output ids */
332         for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
333                 blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
334         for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
335                 blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
336 }
337
338 static void d71_cleanup(struct komeda_dev *mdev)
339 {
340         struct d71_dev *d71 = mdev->chip_data;
341
342         if (!d71)
343                 return;
344
345         devm_kfree(mdev->dev, d71);
346         mdev->chip_data = NULL;
347 }
348
349 static int d71_enum_resources(struct komeda_dev *mdev)
350 {
351         struct d71_dev *d71;
352         struct komeda_pipeline *pipe;
353         struct block_header blk;
354         u32 __iomem *blk_base;
355         u32 i, value, offset;
356         int err;
357
358         d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
359         if (!d71)
360                 return -ENOMEM;
361
362         mdev->chip_data = d71;
363         d71->mdev = mdev;
364         d71->gcu_addr = mdev->reg_base;
365         d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
366
367         err = d71_reset(d71);
368         if (err) {
369                 DRM_ERROR("Fail to reset d71 device.\n");
370                 goto err_cleanup;
371         }
372
373         /* probe GCU */
374         value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
375         d71->num_blocks = value & 0xFF;
376         d71->num_pipelines = (value >> 8) & 0x7;
377
378         if (d71->num_pipelines > D71_MAX_PIPELINE) {
379                 DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
380                           D71_MAX_PIPELINE, d71->num_pipelines);
381                 err = -EINVAL;
382                 goto err_cleanup;
383         }
384
385         /* Only the legacy HW has the periph block, the newer merges the periph
386          * into GCU
387          */
388         value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
389         if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH)
390                 d71->periph_addr = NULL;
391
392         if (d71->periph_addr) {
393                 /* probe PERIPHERAL in legacy HW */
394                 value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
395
396                 d71->max_line_size      = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
397                 d71->max_vsize          = 4096;
398                 d71->num_rich_layers    = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
399                 d71->supports_dual_link = !!(value & PERIPH_SPLIT_EN);
400                 d71->integrates_tbu     = !!(value & PERIPH_TBU_EN);
401         } else {
402                 value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID0);
403                 d71->max_line_size      = GCU_MAX_LINE_SIZE(value);
404                 d71->max_vsize          = GCU_MAX_NUM_LINES(value);
405
406                 value = malidp_read32(d71->gcu_addr, GCU_CONFIGURATION_ID1);
407                 d71->num_rich_layers    = GCU_NUM_RICH_LAYERS(value);
408                 d71->supports_dual_link = GCU_DISPLAY_SPLIT_EN(value);
409                 d71->integrates_tbu     = GCU_DISPLAY_TBU_EN(value);
410         }
411
412         for (i = 0; i < d71->num_pipelines; i++) {
413                 pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
414                                            &d71_pipeline_funcs);
415                 if (IS_ERR(pipe)) {
416                         err = PTR_ERR(pipe);
417                         goto err_cleanup;
418                 }
419
420                 /* D71 HW doesn't update shadow registers when display output
421                  * is turning off, so when we disable all pipeline components
422                  * together with display output disable by one flush or one
423                  * operation, the disable operation updated registers will not
424                  * be flush to or valid in HW, which may leads problem.
425                  * To workaround this problem, introduce a two phase disable.
426                  * Phase1: Disabling components with display is on to make sure
427                  *         the disable can be flushed to HW.
428                  * Phase2: Only turn-off display output.
429                  */
430                 value = KOMEDA_PIPELINE_IMPROCS |
431                         BIT(KOMEDA_COMPONENT_TIMING_CTRLR);
432
433                 pipe->standalone_disabled_comps = value;
434
435                 d71->pipes[i] = to_d71_pipeline(pipe);
436         }
437
438         /* loop the register blks and probe.
439          * NOTE: d71->num_blocks includes reserved blocks.
440          * d71->num_blocks = GCU + valid blocks + reserved blocks
441          */
442         i = 1; /* exclude GCU */
443         offset = D71_BLOCK_SIZE; /* skip GCU */
444         while (i < d71->num_blocks) {
445                 blk_base = mdev->reg_base + (offset >> 2);
446
447                 d71_read_block_header(blk_base, &blk);
448                 if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
449                         err = d71_probe_block(d71, &blk, blk_base);
450                         if (err)
451                                 goto err_cleanup;
452                 }
453
454                 i++;
455                 offset += D71_BLOCK_SIZE;
456         }
457
458         DRM_DEBUG("total %d (out of %d) blocks are found.\n",
459                   i, d71->num_blocks);
460
461         return 0;
462
463 err_cleanup:
464         d71_cleanup(mdev);
465         return err;
466 }
467
468 #define __HW_ID(__group, __format) \
469         ((((__group) & 0x7) << 3) | ((__format) & 0x7))
470
471 #define RICH            KOMEDA_FMT_RICH_LAYER
472 #define SIMPLE          KOMEDA_FMT_SIMPLE_LAYER
473 #define RICH_SIMPLE     (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER)
474 #define RICH_WB         (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER)
475 #define RICH_SIMPLE_WB  (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER)
476
477 #define Rot_0           DRM_MODE_ROTATE_0
478 #define Flip_H_V        (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0)
479 #define Rot_ALL_H_V     (DRM_MODE_ROTATE_MASK | Flip_H_V)
480
481 #define LYT_NM          BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16)
482 #define LYT_WB          BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
483 #define LYT_NM_WB       (LYT_NM | LYT_WB)
484
485 #define AFB_TH          AFBC(_TILED | _SPARSE)
486 #define AFB_TH_SC_YTR   AFBC(_TILED | _SC | _SPARSE | _YTR)
487 #define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
488
489 static struct komeda_format_caps d71_format_caps_table[] = {
490         /*   HW_ID    |        fourcc         |   layer_types |   rots    | afbc_layouts | afbc_features */
491         /* ABGR_2101010*/
492         {__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, RICH_SIMPLE_WB, Flip_H_V,               0, 0},
493         {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE_WB, Flip_H_V,               0, 0},
494         {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
495         {__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, RICH_SIMPLE_WB, Flip_H_V,               0, 0},
496         {__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, RICH_SIMPLE_WB, Flip_H_V,               0, 0},
497         /* ABGR_8888*/
498         {__HW_ID(1, 0), DRM_FORMAT_ARGB8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
499         {__HW_ID(1, 1), DRM_FORMAT_ABGR8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
500         {__HW_ID(1, 1), DRM_FORMAT_ABGR8888,    RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
501         {__HW_ID(1, 2), DRM_FORMAT_RGBA8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
502         {__HW_ID(1, 3), DRM_FORMAT_BGRA8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
503         /* XBGB_8888 */
504         {__HW_ID(2, 0), DRM_FORMAT_XRGB8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
505         {__HW_ID(2, 1), DRM_FORMAT_XBGR8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
506         {__HW_ID(2, 2), DRM_FORMAT_RGBX8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
507         {__HW_ID(2, 3), DRM_FORMAT_BGRX8888,    RICH_SIMPLE_WB, Flip_H_V,               0, 0},
508         /* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */
509         {__HW_ID(3, 0), DRM_FORMAT_RGB888,      RICH_SIMPLE_WB, Rot_0,                  0, 0},
510         {__HW_ID(3, 1), DRM_FORMAT_BGR888,      RICH_SIMPLE_WB, Rot_0,                  0, 0},
511         {__HW_ID(3, 1), DRM_FORMAT_BGR888,      RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
512         /* BGR 16bpp */
513         {__HW_ID(4, 0), DRM_FORMAT_RGBA5551,    RICH_SIMPLE,    Flip_H_V,               0, 0},
514         {__HW_ID(4, 1), DRM_FORMAT_ABGR1555,    RICH_SIMPLE,    Flip_H_V,               0, 0},
515         {__HW_ID(4, 1), DRM_FORMAT_ABGR1555,    RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
516         {__HW_ID(4, 2), DRM_FORMAT_RGB565,      RICH_SIMPLE,    Flip_H_V,               0, 0},
517         {__HW_ID(4, 3), DRM_FORMAT_BGR565,      RICH_SIMPLE,    Flip_H_V,               0, 0},
518         {__HW_ID(4, 3), DRM_FORMAT_BGR565,      RICH_SIMPLE,    Rot_ALL_H_V,    LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
519         {__HW_ID(4, 4), DRM_FORMAT_R8,          SIMPLE,         Rot_0,                  0, 0},
520         /* YUV 444/422/420 8bit  */
521         {__HW_ID(5, 1), DRM_FORMAT_YUYV,        RICH,           Rot_ALL_H_V,    LYT_NM, AFB_TH}, /* afbc */
522         {__HW_ID(5, 2), DRM_FORMAT_YUYV,        RICH,           Flip_H_V,               0, 0},
523         {__HW_ID(5, 3), DRM_FORMAT_UYVY,        RICH,           Flip_H_V,               0, 0},
524         {__HW_ID(5, 6), DRM_FORMAT_NV12,        RICH,           Flip_H_V,               0, 0},
525         {__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH,           Rot_ALL_H_V,    LYT_NM, AFB_TH}, /* afbc */
526         {__HW_ID(5, 7), DRM_FORMAT_YUV420,      RICH,           Flip_H_V,               0, 0},
527         /* YUV 10bit*/
528         {__HW_ID(6, 6), DRM_FORMAT_X0L2,        RICH,           Flip_H_V,               0, 0},
529         {__HW_ID(6, 7), DRM_FORMAT_P010,        RICH,           Flip_H_V,               0, 0},
530         {__HW_ID(6, 7), DRM_FORMAT_YUV420_10BIT, RICH,          Rot_ALL_H_V,    LYT_NM, AFB_TH},
531 };
532
533 static bool d71_format_mod_supported(const struct komeda_format_caps *caps,
534                                      u32 layer_type, u64 modifier, u32 rot)
535 {
536         uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
537
538         if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) &&
539             drm_rotation_90_or_270(rot)) {
540                 DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n");
541                 return false;
542         }
543
544         return true;
545 }
546
547 static void d71_init_fmt_tbl(struct komeda_dev *mdev)
548 {
549         struct komeda_format_caps_table *table = &mdev->fmt_tbl;
550
551         table->format_caps = d71_format_caps_table;
552         table->format_mod_supported = d71_format_mod_supported;
553         table->n_formats = ARRAY_SIZE(d71_format_caps_table);
554 }
555
556 static int d71_connect_iommu(struct komeda_dev *mdev)
557 {
558         struct d71_dev *d71 = mdev->chip_data;
559         u32 __iomem *reg = d71->gcu_addr;
560         u32 check_bits = (d71->num_pipelines == 2) ?
561                          GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
562         int i, ret;
563
564         if (!d71->integrates_tbu)
565                 return -1;
566
567         malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE);
568
569         ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)),
570                         100, 1000, 1000);
571         if (ret < 0) {
572                 DRM_ERROR("timed out connecting to TCU!\n");
573                 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
574                 return ret;
575         }
576
577         for (i = 0; i < d71->num_pipelines; i++)
578                 malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL,
579                                     LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN);
580         return 0;
581 }
582
583 static int d71_disconnect_iommu(struct komeda_dev *mdev)
584 {
585         struct d71_dev *d71 = mdev->chip_data;
586         u32 __iomem *reg = d71->gcu_addr;
587         u32 check_bits = (d71->num_pipelines == 2) ?
588                          GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
589         int ret;
590
591         malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE);
592
593         ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0),
594                         100, 1000, 1000);
595         if (ret < 0) {
596                 DRM_ERROR("timed out disconnecting from TCU!\n");
597                 malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
598         }
599
600         return ret;
601 }
602
603 static const struct komeda_dev_funcs d71_chip_funcs = {
604         .init_format_table      = d71_init_fmt_tbl,
605         .enum_resources         = d71_enum_resources,
606         .cleanup                = d71_cleanup,
607         .irq_handler            = d71_irq_handler,
608         .enable_irq             = d71_enable_irq,
609         .disable_irq            = d71_disable_irq,
610         .on_off_vblank          = d71_on_off_vblank,
611         .change_opmode          = d71_change_opmode,
612         .flush                  = d71_flush,
613         .connect_iommu          = d71_connect_iommu,
614         .disconnect_iommu       = d71_disconnect_iommu,
615         .dump_register          = d71_dump,
616 };
617
618 const struct komeda_dev_funcs *
619 d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
620 {
621         const struct komeda_dev_funcs *funcs;
622         u32 product_id;
623
624         chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
625
626         product_id = MALIDP_CORE_ID_PRODUCT_ID(chip->core_id);
627
628         switch (product_id) {
629         case MALIDP_D71_PRODUCT_ID:
630         case MALIDP_D32_PRODUCT_ID:
631                 funcs = &d71_chip_funcs;
632                 break;
633         default:
634                 DRM_ERROR("Unsupported product: 0x%x\n", product_id);
635                 return NULL;
636         }
637
638         chip->arch_id   = malidp_read32(reg_base, GLB_ARCH_ID);
639         chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
640         chip->bus_width = D71_BUS_WIDTH_16_BYTES;
641
642         return funcs;
643 }