2 * linux/drivers/video/omap2/dss/dsi.c
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #define DSS_SUBSYS_NAME "DSI"
22 #include <linux/kernel.h>
24 #include <linux/clk.h>
25 #include <linux/device.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/mutex.h>
30 #include <linux/module.h>
31 #include <linux/semaphore.h>
32 #include <linux/seq_file.h>
33 #include <linux/platform_device.h>
34 #include <linux/regulator/consumer.h>
35 #include <linux/wait.h>
36 #include <linux/workqueue.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/debugfs.h>
40 #include <linux/pm_runtime.h>
42 #include <video/omapdss.h>
43 #include <video/mipi_display.h>
46 #include "dss_features.h"
48 #define DSI_CATCH_MISSING_TE
50 struct dsi_reg { u16 idx; };
52 #define DSI_REG(idx) ((const struct dsi_reg) { idx })
54 #define DSI_SZ_REGS SZ_1K
55 /* DSI Protocol Engine */
57 #define DSI_REVISION DSI_REG(0x0000)
58 #define DSI_SYSCONFIG DSI_REG(0x0010)
59 #define DSI_SYSSTATUS DSI_REG(0x0014)
60 #define DSI_IRQSTATUS DSI_REG(0x0018)
61 #define DSI_IRQENABLE DSI_REG(0x001C)
62 #define DSI_CTRL DSI_REG(0x0040)
63 #define DSI_GNQ DSI_REG(0x0044)
64 #define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
65 #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
66 #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
67 #define DSI_CLK_CTRL DSI_REG(0x0054)
68 #define DSI_TIMING1 DSI_REG(0x0058)
69 #define DSI_TIMING2 DSI_REG(0x005C)
70 #define DSI_VM_TIMING1 DSI_REG(0x0060)
71 #define DSI_VM_TIMING2 DSI_REG(0x0064)
72 #define DSI_VM_TIMING3 DSI_REG(0x0068)
73 #define DSI_CLK_TIMING DSI_REG(0x006C)
74 #define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
75 #define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
76 #define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
77 #define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
78 #define DSI_VM_TIMING4 DSI_REG(0x0080)
79 #define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
80 #define DSI_VM_TIMING5 DSI_REG(0x0088)
81 #define DSI_VM_TIMING6 DSI_REG(0x008C)
82 #define DSI_VM_TIMING7 DSI_REG(0x0090)
83 #define DSI_STOPCLK_TIMING DSI_REG(0x0094)
84 #define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
85 #define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
86 #define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
87 #define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
88 #define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
89 #define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
90 #define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
94 #define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
95 #define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
96 #define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
97 #define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
98 #define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
100 /* DSI_PLL_CTRL_SCP */
102 #define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
103 #define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
104 #define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
105 #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
106 #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
108 #define REG_GET(dsidev, idx, start, end) \
109 FLD_GET(dsi_read_reg(dsidev, idx), start, end)
111 #define REG_FLD_MOD(dsidev, idx, val, start, end) \
112 dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
114 /* Global interrupts */
115 #define DSI_IRQ_VC0 (1 << 0)
116 #define DSI_IRQ_VC1 (1 << 1)
117 #define DSI_IRQ_VC2 (1 << 2)
118 #define DSI_IRQ_VC3 (1 << 3)
119 #define DSI_IRQ_WAKEUP (1 << 4)
120 #define DSI_IRQ_RESYNC (1 << 5)
121 #define DSI_IRQ_PLL_LOCK (1 << 7)
122 #define DSI_IRQ_PLL_UNLOCK (1 << 8)
123 #define DSI_IRQ_PLL_RECALL (1 << 9)
124 #define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
125 #define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
126 #define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
127 #define DSI_IRQ_TE_TRIGGER (1 << 16)
128 #define DSI_IRQ_ACK_TRIGGER (1 << 17)
129 #define DSI_IRQ_SYNC_LOST (1 << 18)
130 #define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
131 #define DSI_IRQ_TA_TIMEOUT (1 << 20)
132 #define DSI_IRQ_ERROR_MASK \
133 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
134 DSI_IRQ_TA_TIMEOUT | DSI_IRQ_SYNC_LOST)
135 #define DSI_IRQ_CHANNEL_MASK 0xf
137 /* Virtual channel interrupts */
138 #define DSI_VC_IRQ_CS (1 << 0)
139 #define DSI_VC_IRQ_ECC_CORR (1 << 1)
140 #define DSI_VC_IRQ_PACKET_SENT (1 << 2)
141 #define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
142 #define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
143 #define DSI_VC_IRQ_BTA (1 << 5)
144 #define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
145 #define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
146 #define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
147 #define DSI_VC_IRQ_ERROR_MASK \
148 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
149 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
150 DSI_VC_IRQ_FIFO_TX_UDF)
152 /* ComplexIO interrupts */
153 #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
154 #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
155 #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
156 #define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
157 #define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
158 #define DSI_CIO_IRQ_ERRESC1 (1 << 5)
159 #define DSI_CIO_IRQ_ERRESC2 (1 << 6)
160 #define DSI_CIO_IRQ_ERRESC3 (1 << 7)
161 #define DSI_CIO_IRQ_ERRESC4 (1 << 8)
162 #define DSI_CIO_IRQ_ERRESC5 (1 << 9)
163 #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
164 #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
165 #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
166 #define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
167 #define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
168 #define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
169 #define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
170 #define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
171 #define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
172 #define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
173 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
174 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
175 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
176 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
177 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
178 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
179 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
180 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
181 #define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
182 #define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
183 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
184 #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
185 #define DSI_CIO_IRQ_ERROR_MASK \
186 (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
187 DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
188 DSI_CIO_IRQ_ERRSYNCESC5 | \
189 DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
190 DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
191 DSI_CIO_IRQ_ERRESC5 | \
192 DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
193 DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
194 DSI_CIO_IRQ_ERRCONTROL5 | \
195 DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
196 DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
197 DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
198 DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
199 DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
201 typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
203 static int dsi_display_init_dispc(struct platform_device *dsidev,
204 struct omap_overlay_manager *mgr);
205 static void dsi_display_uninit_dispc(struct platform_device *dsidev,
206 struct omap_overlay_manager *mgr);
208 #define DSI_MAX_NR_ISRS 2
209 #define DSI_MAX_NR_LANES 5
211 enum dsi_lane_function {
220 struct dsi_lane_config {
221 enum dsi_lane_function function;
225 struct dsi_isr_data {
233 DSI_FIFO_SIZE_32 = 1,
234 DSI_FIFO_SIZE_64 = 2,
235 DSI_FIFO_SIZE_96 = 3,
236 DSI_FIFO_SIZE_128 = 4,
240 DSI_VC_SOURCE_L4 = 0,
244 struct dsi_irq_stats {
245 unsigned long last_reset;
247 unsigned dsi_irqs[32];
248 unsigned vc_irqs[4][32];
249 unsigned cio_irqs[32];
252 struct dsi_isr_tables {
253 struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS];
254 struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS];
255 struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS];
259 struct platform_device *pdev;
269 struct dispc_clock_info user_dispc_cinfo;
270 struct dsi_clock_info user_dsi_cinfo;
272 enum omap_dss_clk_source user_dispc_fclk_src;
273 enum omap_dss_clk_source user_lcd_clk_src;
274 enum omap_dss_clk_source user_dsi_fclk_src;
276 struct dsi_clock_info current_cinfo;
278 bool vdds_dsi_enabled;
279 struct regulator *vdds_dsi_reg;
282 enum dsi_vc_source source;
283 struct omap_dss_device *dssdev;
284 enum fifo_size fifo_size;
289 struct semaphore bus_lock;
294 struct dsi_isr_tables isr_tables;
295 /* space for a copy used by the interrupt handler */
296 struct dsi_isr_tables isr_tables_copy;
300 unsigned update_bytes;
306 void (*framedone_callback)(int, void *);
307 void *framedone_data;
309 struct delayed_work framedone_timeout_work;
311 #ifdef DSI_CATCH_MISSING_TE
312 struct timer_list te_timer;
315 unsigned long cache_req_pck;
316 unsigned long cache_clk_freq;
317 struct dsi_clock_info cache_cinfo;
320 spinlock_t errors_lock;
322 ktime_t perf_setup_time;
323 ktime_t perf_start_time;
328 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
329 spinlock_t irq_stats_lock;
330 struct dsi_irq_stats irq_stats;
332 /* DSI PLL Parameter Ranges */
333 unsigned long regm_max, regn_max;
334 unsigned long regm_dispc_max, regm_dsi_max;
335 unsigned long fint_min, fint_max;
336 unsigned long lpdiv_max;
338 unsigned num_lanes_supported;
339 unsigned line_buffer_size;
341 struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
342 unsigned num_lanes_used;
344 unsigned scp_clk_refcount;
346 struct dss_lcd_mgr_config mgr_config;
347 struct omap_video_timings timings;
348 enum omap_dss_dsi_pixel_format pix_fmt;
349 enum omap_dss_dsi_mode mode;
350 struct omap_dss_dsi_videomode_timings vm_timings;
352 struct omap_dss_output output;
355 struct dsi_packet_sent_handler_data {
356 struct platform_device *dsidev;
357 struct completion *completion;
361 static bool dsi_perf;
362 module_param(dsi_perf, bool, 0644);
365 static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
367 return dev_get_drvdata(&dsidev->dev);
370 static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
372 return dssdev->output->pdev;
375 struct platform_device *dsi_get_dsidev_from_id(int module)
377 struct omap_dss_output *out;
378 enum omap_dss_output_id id;
382 id = OMAP_DSS_OUTPUT_DSI1;
385 id = OMAP_DSS_OUTPUT_DSI2;
391 out = omap_dss_get_output(id);
393 return out ? out->pdev : NULL;
396 static inline void dsi_write_reg(struct platform_device *dsidev,
397 const struct dsi_reg idx, u32 val)
399 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
401 __raw_writel(val, dsi->base + idx.idx);
404 static inline u32 dsi_read_reg(struct platform_device *dsidev,
405 const struct dsi_reg idx)
407 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
409 return __raw_readl(dsi->base + idx.idx);
412 void dsi_bus_lock(struct omap_dss_device *dssdev)
414 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
415 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
417 down(&dsi->bus_lock);
419 EXPORT_SYMBOL(dsi_bus_lock);
421 void dsi_bus_unlock(struct omap_dss_device *dssdev)
423 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
424 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
428 EXPORT_SYMBOL(dsi_bus_unlock);
430 static bool dsi_bus_is_locked(struct platform_device *dsidev)
432 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
434 return dsi->bus_lock.count == 0;
437 static void dsi_completion_handler(void *data, u32 mask)
439 complete((struct completion *)data);
442 static inline int wait_for_bit_change(struct platform_device *dsidev,
443 const struct dsi_reg idx, int bitnum, int value)
445 unsigned long timeout;
449 /* first busyloop to see if the bit changes right away */
452 if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
456 /* then loop for 500ms, sleeping for 1ms in between */
457 timeout = jiffies + msecs_to_jiffies(500);
458 while (time_before(jiffies, timeout)) {
459 if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
462 wait = ns_to_ktime(1000 * 1000);
463 set_current_state(TASK_UNINTERRUPTIBLE);
464 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
470 u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
473 case OMAP_DSS_DSI_FMT_RGB888:
474 case OMAP_DSS_DSI_FMT_RGB666:
476 case OMAP_DSS_DSI_FMT_RGB666_PACKED:
478 case OMAP_DSS_DSI_FMT_RGB565:
487 static void dsi_perf_mark_setup(struct platform_device *dsidev)
489 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
490 dsi->perf_setup_time = ktime_get();
493 static void dsi_perf_mark_start(struct platform_device *dsidev)
495 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
496 dsi->perf_start_time = ktime_get();
499 static void dsi_perf_show(struct platform_device *dsidev, const char *name)
501 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
502 ktime_t t, setup_time, trans_time;
504 u32 setup_us, trans_us, total_us;
511 setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time);
512 setup_us = (u32)ktime_to_us(setup_time);
516 trans_time = ktime_sub(t, dsi->perf_start_time);
517 trans_us = (u32)ktime_to_us(trans_time);
521 total_us = setup_us + trans_us;
523 total_bytes = dsi->update_bytes;
525 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
526 "%u bytes, %u kbytes/sec\n",
531 1000*1000 / total_us,
533 total_bytes * 1000 / total_us);
536 static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
540 static inline void dsi_perf_mark_start(struct platform_device *dsidev)
544 static inline void dsi_perf_show(struct platform_device *dsidev,
550 static int verbose_irq;
552 static void print_irq_status(u32 status)
557 if (!verbose_irq && (status & ~DSI_IRQ_CHANNEL_MASK) == 0)
560 #define PIS(x) (status & DSI_IRQ_##x) ? (#x " ") : ""
562 pr_debug("DSI IRQ: 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
564 verbose_irq ? PIS(VC0) : "",
565 verbose_irq ? PIS(VC1) : "",
566 verbose_irq ? PIS(VC2) : "",
567 verbose_irq ? PIS(VC3) : "",
584 static void print_irq_status_vc(int channel, u32 status)
589 if (!verbose_irq && (status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
592 #define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : ""
594 pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n",
600 verbose_irq ? PIS(PACKET_SENT) : "",
605 PIS(PP_BUSY_CHANGE));
609 static void print_irq_status_cio(u32 status)
614 #define PIS(x) (status & DSI_CIO_IRQ_##x) ? (#x " ") : ""
616 pr_debug("DSI CIO IRQ 0x%x: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
630 PIS(ERRCONTENTIONLP0_1),
631 PIS(ERRCONTENTIONLP1_1),
632 PIS(ERRCONTENTIONLP0_2),
633 PIS(ERRCONTENTIONLP1_2),
634 PIS(ERRCONTENTIONLP0_3),
635 PIS(ERRCONTENTIONLP1_3),
636 PIS(ULPSACTIVENOT_ALL0),
637 PIS(ULPSACTIVENOT_ALL1));
641 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
642 static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
643 u32 *vcstatus, u32 ciostatus)
645 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
648 spin_lock(&dsi->irq_stats_lock);
650 dsi->irq_stats.irq_count++;
651 dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs);
653 for (i = 0; i < 4; ++i)
654 dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]);
656 dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs);
658 spin_unlock(&dsi->irq_stats_lock);
661 #define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
664 static int debug_irq;
666 static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
667 u32 *vcstatus, u32 ciostatus)
669 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
672 if (irqstatus & DSI_IRQ_ERROR_MASK) {
673 DSSERR("DSI error, irqstatus %x\n", irqstatus);
674 print_irq_status(irqstatus);
675 spin_lock(&dsi->errors_lock);
676 dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK;
677 spin_unlock(&dsi->errors_lock);
678 } else if (debug_irq) {
679 print_irq_status(irqstatus);
682 for (i = 0; i < 4; ++i) {
683 if (vcstatus[i] & DSI_VC_IRQ_ERROR_MASK) {
684 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
686 print_irq_status_vc(i, vcstatus[i]);
687 } else if (debug_irq) {
688 print_irq_status_vc(i, vcstatus[i]);
692 if (ciostatus & DSI_CIO_IRQ_ERROR_MASK) {
693 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
694 print_irq_status_cio(ciostatus);
695 } else if (debug_irq) {
696 print_irq_status_cio(ciostatus);
700 static void dsi_call_isrs(struct dsi_isr_data *isr_array,
701 unsigned isr_array_size, u32 irqstatus)
703 struct dsi_isr_data *isr_data;
706 for (i = 0; i < isr_array_size; i++) {
707 isr_data = &isr_array[i];
708 if (isr_data->isr && isr_data->mask & irqstatus)
709 isr_data->isr(isr_data->arg, irqstatus);
713 static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
714 u32 irqstatus, u32 *vcstatus, u32 ciostatus)
718 dsi_call_isrs(isr_tables->isr_table,
719 ARRAY_SIZE(isr_tables->isr_table),
722 for (i = 0; i < 4; ++i) {
723 if (vcstatus[i] == 0)
725 dsi_call_isrs(isr_tables->isr_table_vc[i],
726 ARRAY_SIZE(isr_tables->isr_table_vc[i]),
731 dsi_call_isrs(isr_tables->isr_table_cio,
732 ARRAY_SIZE(isr_tables->isr_table_cio),
736 static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
738 struct platform_device *dsidev;
739 struct dsi_data *dsi;
740 u32 irqstatus, vcstatus[4], ciostatus;
743 dsidev = (struct platform_device *) arg;
744 dsi = dsi_get_dsidrv_data(dsidev);
746 spin_lock(&dsi->irq_lock);
748 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
750 /* IRQ is not for us */
752 spin_unlock(&dsi->irq_lock);
756 dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
757 /* flush posted write */
758 dsi_read_reg(dsidev, DSI_IRQSTATUS);
760 for (i = 0; i < 4; ++i) {
761 if ((irqstatus & (1 << i)) == 0) {
766 vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
768 dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
769 /* flush posted write */
770 dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
773 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
774 ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
776 dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
777 /* flush posted write */
778 dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
783 #ifdef DSI_CATCH_MISSING_TE
784 if (irqstatus & DSI_IRQ_TE_TRIGGER)
785 del_timer(&dsi->te_timer);
788 /* make a copy and unlock, so that isrs can unregister
790 memcpy(&dsi->isr_tables_copy, &dsi->isr_tables,
791 sizeof(dsi->isr_tables));
793 spin_unlock(&dsi->irq_lock);
795 dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
797 dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
799 dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
804 /* dsi->irq_lock has to be locked by the caller */
805 static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
806 struct dsi_isr_data *isr_array,
807 unsigned isr_array_size, u32 default_mask,
808 const struct dsi_reg enable_reg,
809 const struct dsi_reg status_reg)
811 struct dsi_isr_data *isr_data;
818 for (i = 0; i < isr_array_size; i++) {
819 isr_data = &isr_array[i];
821 if (isr_data->isr == NULL)
824 mask |= isr_data->mask;
827 old_mask = dsi_read_reg(dsidev, enable_reg);
828 /* clear the irqstatus for newly enabled irqs */
829 dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
830 dsi_write_reg(dsidev, enable_reg, mask);
832 /* flush posted writes */
833 dsi_read_reg(dsidev, enable_reg);
834 dsi_read_reg(dsidev, status_reg);
837 /* dsi->irq_lock has to be locked by the caller */
838 static void _omap_dsi_set_irqs(struct platform_device *dsidev)
840 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
841 u32 mask = DSI_IRQ_ERROR_MASK;
842 #ifdef DSI_CATCH_MISSING_TE
843 mask |= DSI_IRQ_TE_TRIGGER;
845 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
846 ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
847 DSI_IRQENABLE, DSI_IRQSTATUS);
850 /* dsi->irq_lock has to be locked by the caller */
851 static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
853 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
855 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
856 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
857 DSI_VC_IRQ_ERROR_MASK,
858 DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
861 /* dsi->irq_lock has to be locked by the caller */
862 static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
864 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
866 _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
867 ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
868 DSI_CIO_IRQ_ERROR_MASK,
869 DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
872 static void _dsi_initialize_irq(struct platform_device *dsidev)
874 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
878 spin_lock_irqsave(&dsi->irq_lock, flags);
880 memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
882 _omap_dsi_set_irqs(dsidev);
883 for (vc = 0; vc < 4; ++vc)
884 _omap_dsi_set_irqs_vc(dsidev, vc);
885 _omap_dsi_set_irqs_cio(dsidev);
887 spin_unlock_irqrestore(&dsi->irq_lock, flags);
890 static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
891 struct dsi_isr_data *isr_array, unsigned isr_array_size)
893 struct dsi_isr_data *isr_data;
899 /* check for duplicate entry and find a free slot */
901 for (i = 0; i < isr_array_size; i++) {
902 isr_data = &isr_array[i];
904 if (isr_data->isr == isr && isr_data->arg == arg &&
905 isr_data->mask == mask) {
909 if (isr_data->isr == NULL && free_idx == -1)
916 isr_data = &isr_array[free_idx];
919 isr_data->mask = mask;
924 static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
925 struct dsi_isr_data *isr_array, unsigned isr_array_size)
927 struct dsi_isr_data *isr_data;
930 for (i = 0; i < isr_array_size; i++) {
931 isr_data = &isr_array[i];
932 if (isr_data->isr != isr || isr_data->arg != arg ||
933 isr_data->mask != mask)
936 isr_data->isr = NULL;
937 isr_data->arg = NULL;
946 static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
949 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
953 spin_lock_irqsave(&dsi->irq_lock, flags);
955 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table,
956 ARRAY_SIZE(dsi->isr_tables.isr_table));
959 _omap_dsi_set_irqs(dsidev);
961 spin_unlock_irqrestore(&dsi->irq_lock, flags);
966 static int dsi_unregister_isr(struct platform_device *dsidev,
967 omap_dsi_isr_t isr, void *arg, u32 mask)
969 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
973 spin_lock_irqsave(&dsi->irq_lock, flags);
975 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table,
976 ARRAY_SIZE(dsi->isr_tables.isr_table));
979 _omap_dsi_set_irqs(dsidev);
981 spin_unlock_irqrestore(&dsi->irq_lock, flags);
986 static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
987 omap_dsi_isr_t isr, void *arg, u32 mask)
989 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
993 spin_lock_irqsave(&dsi->irq_lock, flags);
995 r = _dsi_register_isr(isr, arg, mask,
996 dsi->isr_tables.isr_table_vc[channel],
997 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1000 _omap_dsi_set_irqs_vc(dsidev, channel);
1002 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1007 static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
1008 omap_dsi_isr_t isr, void *arg, u32 mask)
1010 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1011 unsigned long flags;
1014 spin_lock_irqsave(&dsi->irq_lock, flags);
1016 r = _dsi_unregister_isr(isr, arg, mask,
1017 dsi->isr_tables.isr_table_vc[channel],
1018 ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
1021 _omap_dsi_set_irqs_vc(dsidev, channel);
1023 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1028 static int dsi_register_isr_cio(struct platform_device *dsidev,
1029 omap_dsi_isr_t isr, void *arg, u32 mask)
1031 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1032 unsigned long flags;
1035 spin_lock_irqsave(&dsi->irq_lock, flags);
1037 r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1038 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1041 _omap_dsi_set_irqs_cio(dsidev);
1043 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1048 static int dsi_unregister_isr_cio(struct platform_device *dsidev,
1049 omap_dsi_isr_t isr, void *arg, u32 mask)
1051 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1052 unsigned long flags;
1055 spin_lock_irqsave(&dsi->irq_lock, flags);
1057 r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio,
1058 ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
1061 _omap_dsi_set_irqs_cio(dsidev);
1063 spin_unlock_irqrestore(&dsi->irq_lock, flags);
1068 static u32 dsi_get_errors(struct platform_device *dsidev)
1070 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1071 unsigned long flags;
1073 spin_lock_irqsave(&dsi->errors_lock, flags);
1076 spin_unlock_irqrestore(&dsi->errors_lock, flags);
1080 int dsi_runtime_get(struct platform_device *dsidev)
1083 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1085 DSSDBG("dsi_runtime_get\n");
1087 r = pm_runtime_get_sync(&dsi->pdev->dev);
1089 return r < 0 ? r : 0;
1092 void dsi_runtime_put(struct platform_device *dsidev)
1094 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1097 DSSDBG("dsi_runtime_put\n");
1099 r = pm_runtime_put_sync(&dsi->pdev->dev);
1100 WARN_ON(r < 0 && r != -ENOSYS);
1103 /* source clock for DSI PLL. this could also be PCLKFREE */
1104 static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
1107 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1110 clk_prepare_enable(dsi->sys_clk);
1112 clk_disable_unprepare(dsi->sys_clk);
1114 if (enable && dsi->pll_locked) {
1115 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
1116 DSSERR("cannot lock PLL when enabling clocks\n");
1120 static void _dsi_print_reset_status(struct platform_device *dsidev)
1125 /* A dummy read using the SCP interface to any DSIPHY register is
1126 * required after DSIPHY reset to complete the reset of the DSI complex
1128 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
1130 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
1140 #define DSI_FLD_GET(fld, start, end)\
1141 FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
1143 pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
1144 DSI_FLD_GET(PLL_STATUS, 0, 0),
1145 DSI_FLD_GET(COMPLEXIO_CFG1, 29, 29),
1146 DSI_FLD_GET(DSIPHY_CFG5, b0, b0),
1147 DSI_FLD_GET(DSIPHY_CFG5, b1, b1),
1148 DSI_FLD_GET(DSIPHY_CFG5, b2, b2),
1149 DSI_FLD_GET(DSIPHY_CFG5, 29, 29),
1150 DSI_FLD_GET(DSIPHY_CFG5, 30, 30),
1151 DSI_FLD_GET(DSIPHY_CFG5, 31, 31));
1156 static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
1158 DSSDBG("dsi_if_enable(%d)\n", enable);
1160 enable = enable ? 1 : 0;
1161 REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
1163 if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
1164 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
1171 unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
1173 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1175 return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk;
1178 static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
1180 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1182 return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk;
1185 static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1187 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1189 return dsi->current_cinfo.clkin4ddr / 16;
1192 static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1195 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1197 if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
1198 /* DSI FCLK source is DSS_CLK_FCK */
1199 r = clk_get_rate(dsi->dss_clk);
1201 /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
1202 r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
1208 static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
1210 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1211 unsigned long dsi_fclk;
1212 unsigned lp_clk_div;
1213 unsigned long lp_clk;
1215 lp_clk_div = dsi->user_dsi_cinfo.lp_clk_div;
1217 if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max)
1220 dsi_fclk = dsi_fclk_rate(dsidev);
1222 lp_clk = dsi_fclk / 2 / lp_clk_div;
1224 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
1225 dsi->current_cinfo.lp_clk = lp_clk;
1226 dsi->current_cinfo.lp_clk_div = lp_clk_div;
1228 /* LP_CLK_DIVISOR */
1229 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
1231 /* LP_RX_SYNCHRO_ENABLE */
1232 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
1237 static void dsi_enable_scp_clk(struct platform_device *dsidev)
1239 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1241 if (dsi->scp_clk_refcount++ == 0)
1242 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
1245 static void dsi_disable_scp_clk(struct platform_device *dsidev)
1247 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1249 WARN_ON(dsi->scp_clk_refcount == 0);
1250 if (--dsi->scp_clk_refcount == 0)
1251 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
1254 enum dsi_pll_power_state {
1255 DSI_PLL_POWER_OFF = 0x0,
1256 DSI_PLL_POWER_ON_HSCLK = 0x1,
1257 DSI_PLL_POWER_ON_ALL = 0x2,
1258 DSI_PLL_POWER_ON_DIV = 0x3,
1261 static int dsi_pll_power(struct platform_device *dsidev,
1262 enum dsi_pll_power_state state)
1266 /* DSI-PLL power command 0x3 is not working */
1267 if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) &&
1268 state == DSI_PLL_POWER_ON_DIV)
1269 state = DSI_PLL_POWER_ON_ALL;
1272 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
1274 /* PLL_PWR_STATUS */
1275 while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
1277 DSSERR("Failed to set DSI PLL power mode to %d\n",
1287 /* calculate clock rates using dividers in cinfo */
1288 static int dsi_calc_clock_rates(struct platform_device *dsidev,
1289 struct dsi_clock_info *cinfo)
1291 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1293 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
1296 if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max)
1299 if (cinfo->regm_dispc > dsi->regm_dispc_max)
1302 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1305 cinfo->clkin = clk_get_rate(dsi->sys_clk);
1306 cinfo->fint = cinfo->clkin / cinfo->regn;
1308 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1311 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
1313 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
1316 if (cinfo->regm_dispc > 0)
1317 cinfo->dsi_pll_hsdiv_dispc_clk =
1318 cinfo->clkin4ddr / cinfo->regm_dispc;
1320 cinfo->dsi_pll_hsdiv_dispc_clk = 0;
1322 if (cinfo->regm_dsi > 0)
1323 cinfo->dsi_pll_hsdiv_dsi_clk =
1324 cinfo->clkin4ddr / cinfo->regm_dsi;
1326 cinfo->dsi_pll_hsdiv_dsi_clk = 0;
1331 int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
1332 unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
1333 struct dispc_clock_info *dispc_cinfo)
1335 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1336 struct dsi_clock_info cur, best;
1337 struct dispc_clock_info best_dispc;
1338 int min_fck_per_pck;
1340 unsigned long dss_sys_clk, max_dss_fck;
1342 dss_sys_clk = clk_get_rate(dsi->sys_clk);
1344 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1346 if (req_pck == dsi->cache_req_pck &&
1347 dsi->cache_cinfo.clkin == dss_sys_clk) {
1348 DSSDBG("DSI clock info found from cache\n");
1349 *dsi_cinfo = dsi->cache_cinfo;
1350 dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk,
1355 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1357 if (min_fck_per_pck &&
1358 req_pck * min_fck_per_pck > max_dss_fck) {
1359 DSSERR("Requested pixel clock not possible with the current "
1360 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1361 "the constraint off.\n");
1362 min_fck_per_pck = 0;
1365 DSSDBG("dsi_pll_calc\n");
1368 memset(&best, 0, sizeof(best));
1369 memset(&best_dispc, 0, sizeof(best_dispc));
1371 memset(&cur, 0, sizeof(cur));
1372 cur.clkin = dss_sys_clk;
1374 /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
1375 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1376 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1377 cur.fint = cur.clkin / cur.regn;
1379 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1382 /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
1383 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1386 a = 2 * cur.regm * (cur.clkin/1000);
1388 cur.clkin4ddr = a / b * 1000;
1390 if (cur.clkin4ddr > 1800 * 1000 * 1000)
1393 /* dsi_pll_hsdiv_dispc_clk(MHz) =
1394 * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */
1395 for (cur.regm_dispc = 1; cur.regm_dispc <
1396 dsi->regm_dispc_max; ++cur.regm_dispc) {
1397 struct dispc_clock_info cur_dispc;
1398 cur.dsi_pll_hsdiv_dispc_clk =
1399 cur.clkin4ddr / cur.regm_dispc;
1401 if (cur.regm_dispc > 1 &&
1402 cur.regm_dispc % 2 != 0 &&
1406 /* this will narrow down the search a bit,
1407 * but still give pixclocks below what was
1409 if (cur.dsi_pll_hsdiv_dispc_clk < req_pck)
1412 if (cur.dsi_pll_hsdiv_dispc_clk > max_dss_fck)
1415 if (min_fck_per_pck &&
1416 cur.dsi_pll_hsdiv_dispc_clk <
1417 req_pck * min_fck_per_pck)
1422 dispc_find_clk_divs(req_pck,
1423 cur.dsi_pll_hsdiv_dispc_clk,
1426 if (abs(cur_dispc.pck - req_pck) <
1427 abs(best_dispc.pck - req_pck)) {
1429 best_dispc = cur_dispc;
1431 if (cur_dispc.pck == req_pck)
1439 if (min_fck_per_pck) {
1440 DSSERR("Could not find suitable clock settings.\n"
1441 "Turning FCK/PCK constraint off and"
1443 min_fck_per_pck = 0;
1447 DSSERR("Could not find suitable clock settings.\n");
1452 /* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
1454 best.dsi_pll_hsdiv_dsi_clk = 0;
1459 *dispc_cinfo = best_dispc;
1461 dsi->cache_req_pck = req_pck;
1462 dsi->cache_clk_freq = 0;
1463 dsi->cache_cinfo = best;
1468 static int dsi_pll_calc_ddrfreq(struct platform_device *dsidev,
1469 unsigned long req_clkin4ddr, struct dsi_clock_info *cinfo)
1471 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1472 struct dsi_clock_info cur, best;
1474 DSSDBG("dsi_pll_calc_ddrfreq\n");
1476 memset(&best, 0, sizeof(best));
1477 memset(&cur, 0, sizeof(cur));
1479 cur.clkin = clk_get_rate(dsi->sys_clk);
1481 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1482 cur.fint = cur.clkin / cur.regn;
1484 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1487 /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
1488 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1491 a = 2 * cur.regm * (cur.clkin/1000);
1493 cur.clkin4ddr = a / b * 1000;
1495 if (cur.clkin4ddr > 1800 * 1000 * 1000)
1498 if (abs(cur.clkin4ddr - req_clkin4ddr) <
1499 abs(best.clkin4ddr - req_clkin4ddr)) {
1501 DSSDBG("best %ld\n", best.clkin4ddr);
1504 if (cur.clkin4ddr == req_clkin4ddr)
1515 static void dsi_pll_calc_dsi_fck(struct platform_device *dsidev,
1516 struct dsi_clock_info *cinfo)
1518 unsigned long max_dsi_fck;
1520 max_dsi_fck = dss_feat_get_param_max(FEAT_PARAM_DSI_FCK);
1522 cinfo->regm_dsi = DIV_ROUND_UP(cinfo->clkin4ddr, max_dsi_fck);
1523 cinfo->dsi_pll_hsdiv_dsi_clk = cinfo->clkin4ddr / cinfo->regm_dsi;
1526 static int dsi_pll_calc_dispc_fck(struct platform_device *dsidev,
1527 unsigned long req_pck, struct dsi_clock_info *cinfo,
1528 struct dispc_clock_info *dispc_cinfo)
1530 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1531 unsigned regm_dispc, best_regm_dispc;
1532 unsigned long dispc_clk, best_dispc_clk;
1533 int min_fck_per_pck;
1534 unsigned long max_dss_fck;
1535 struct dispc_clock_info best_dispc;
1538 max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK);
1540 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
1542 if (min_fck_per_pck &&
1543 req_pck * min_fck_per_pck > max_dss_fck) {
1544 DSSERR("Requested pixel clock not possible with the current "
1545 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
1546 "the constraint off.\n");
1547 min_fck_per_pck = 0;
1551 best_regm_dispc = 0;
1553 memset(&best_dispc, 0, sizeof(best_dispc));
1556 for (regm_dispc = 1; regm_dispc < dsi->regm_dispc_max; ++regm_dispc) {
1557 struct dispc_clock_info cur_dispc;
1559 dispc_clk = cinfo->clkin4ddr / regm_dispc;
1561 /* this will narrow down the search a bit,
1562 * but still give pixclocks below what was
1564 if (dispc_clk < req_pck)
1567 if (dispc_clk > max_dss_fck)
1570 if (min_fck_per_pck && dispc_clk < req_pck * min_fck_per_pck)
1575 dispc_find_clk_divs(req_pck, dispc_clk, &cur_dispc);
1577 if (abs(cur_dispc.pck - req_pck) <
1578 abs(best_dispc.pck - req_pck)) {
1579 best_regm_dispc = regm_dispc;
1580 best_dispc_clk = dispc_clk;
1581 best_dispc = cur_dispc;
1583 if (cur_dispc.pck == req_pck)
1589 if (min_fck_per_pck) {
1590 DSSERR("Could not find suitable clock settings.\n"
1591 "Turning FCK/PCK constraint off and"
1593 min_fck_per_pck = 0;
1597 DSSERR("Could not find suitable clock settings.\n");
1602 cinfo->regm_dispc = best_regm_dispc;
1603 cinfo->dsi_pll_hsdiv_dispc_clk = best_dispc_clk;
1605 *dispc_cinfo = best_dispc;
1610 int dsi_pll_set_clock_div(struct platform_device *dsidev,
1611 struct dsi_clock_info *cinfo)
1613 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1617 u8 regn_start, regn_end, regm_start, regm_end;
1618 u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end;
1620 DSSDBG("DSI PLL clock config starts");
1622 dsi->current_cinfo.clkin = cinfo->clkin;
1623 dsi->current_cinfo.fint = cinfo->fint;
1624 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1625 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
1626 cinfo->dsi_pll_hsdiv_dispc_clk;
1627 dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk =
1628 cinfo->dsi_pll_hsdiv_dsi_clk;
1630 dsi->current_cinfo.regn = cinfo->regn;
1631 dsi->current_cinfo.regm = cinfo->regm;
1632 dsi->current_cinfo.regm_dispc = cinfo->regm_dispc;
1633 dsi->current_cinfo.regm_dsi = cinfo->regm_dsi;
1635 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1637 DSSDBG("clkin rate %ld\n", cinfo->clkin);
1639 /* DSIPHY == CLKIN4DDR */
1640 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
1646 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1647 cinfo->clkin4ddr / 1000 / 1000 / 2);
1649 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1651 DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc,
1652 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1653 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
1654 cinfo->dsi_pll_hsdiv_dispc_clk);
1655 DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi,
1656 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1657 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
1658 cinfo->dsi_pll_hsdiv_dsi_clk);
1660 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, ®n_start, ®n_end);
1661 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM, ®m_start, ®m_end);
1662 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DISPC, ®m_dispc_start,
1664 dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, ®m_dsi_start,
1667 /* DSI_PLL_AUTOMODE = manual */
1668 REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0);
1670 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1);
1671 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1673 l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end);
1675 l = FLD_MOD(l, cinfo->regm, regm_start, regm_end);
1677 l = FLD_MOD(l, cinfo->regm_dispc > 0 ? cinfo->regm_dispc - 1 : 0,
1678 regm_dispc_start, regm_dispc_end);
1679 /* DSIPROTO_CLOCK_DIV */
1680 l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0,
1681 regm_dsi_start, regm_dsi_end);
1682 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l);
1684 BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max);
1686 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1688 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) {
1689 f = cinfo->fint < 1000000 ? 0x3 :
1690 cinfo->fint < 1250000 ? 0x4 :
1691 cinfo->fint < 1500000 ? 0x5 :
1692 cinfo->fint < 1750000 ? 0x6 :
1695 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1696 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
1697 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
1699 l = FLD_MOD(l, f, 4, 1); /* PLL_SELFREQDCO */
1702 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1703 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1704 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1705 if (dss_has_feature(FEAT_DSI_PLL_REFSEL))
1706 l = FLD_MOD(l, 3, 22, 21); /* REF_SYSCLK = sysclk */
1707 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1709 REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1711 if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) {
1712 DSSERR("dsi pll go bit not going down.\n");
1717 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) {
1718 DSSERR("cannot lock PLL\n");
1723 dsi->pll_locked = 1;
1725 l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2);
1726 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1727 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1728 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1729 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1730 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1731 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1732 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1733 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1734 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1735 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1736 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1737 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1738 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1739 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1740 dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l);
1742 DSSDBG("PLL config done\n");
1747 int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
1750 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1752 enum dsi_pll_power_state pwstate;
1754 DSSDBG("PLL init\n");
1757 * It seems that on many OMAPs we need to enable both to have a
1758 * functional HSDivider.
1760 enable_hsclk = enable_hsdiv = true;
1762 if (dsi->vdds_dsi_reg == NULL) {
1763 struct regulator *vdds_dsi;
1765 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
1767 /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
1768 if (IS_ERR(vdds_dsi))
1769 vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
1771 if (IS_ERR(vdds_dsi)) {
1772 DSSERR("can't get VDDS_DSI regulator\n");
1773 return PTR_ERR(vdds_dsi);
1776 dsi->vdds_dsi_reg = vdds_dsi;
1779 dsi_enable_pll_clock(dsidev, 1);
1781 * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
1783 dsi_enable_scp_clk(dsidev);
1785 if (!dsi->vdds_dsi_enabled) {
1786 r = regulator_enable(dsi->vdds_dsi_reg);
1789 dsi->vdds_dsi_enabled = true;
1792 /* XXX PLL does not come out of reset without this... */
1793 dispc_pck_free_enable(1);
1795 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
1796 DSSERR("PLL not coming out of reset.\n");
1798 dispc_pck_free_enable(0);
1802 /* XXX ... but if left on, we get problems when planes do not
1803 * fill the whole display. No idea about this */
1804 dispc_pck_free_enable(0);
1806 if (enable_hsclk && enable_hsdiv)
1807 pwstate = DSI_PLL_POWER_ON_ALL;
1808 else if (enable_hsclk)
1809 pwstate = DSI_PLL_POWER_ON_HSCLK;
1810 else if (enable_hsdiv)
1811 pwstate = DSI_PLL_POWER_ON_DIV;
1813 pwstate = DSI_PLL_POWER_OFF;
1815 r = dsi_pll_power(dsidev, pwstate);
1820 DSSDBG("PLL init done\n");
1824 if (dsi->vdds_dsi_enabled) {
1825 regulator_disable(dsi->vdds_dsi_reg);
1826 dsi->vdds_dsi_enabled = false;
1829 dsi_disable_scp_clk(dsidev);
1830 dsi_enable_pll_clock(dsidev, 0);
1834 void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
1836 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1838 dsi->pll_locked = 0;
1839 dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
1840 if (disconnect_lanes) {
1841 WARN_ON(!dsi->vdds_dsi_enabled);
1842 regulator_disable(dsi->vdds_dsi_reg);
1843 dsi->vdds_dsi_enabled = false;
1846 dsi_disable_scp_clk(dsidev);
1847 dsi_enable_pll_clock(dsidev, 0);
1849 DSSDBG("PLL uninit done\n");
1852 static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1855 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1856 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1857 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1858 int dsi_module = dsi->module_id;
1860 dispc_clk_src = dss_get_dispc_clk_source();
1861 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
1863 if (dsi_runtime_get(dsidev))
1866 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1868 seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
1870 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1872 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1873 cinfo->clkin4ddr, cinfo->regm);
1875 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16luregm_dispc %u\t(%s)\n",
1876 dss_feat_get_clk_source_name(dsi_module == 0 ?
1877 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
1878 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
1879 cinfo->dsi_pll_hsdiv_dispc_clk,
1881 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1884 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16luregm_dsi %u\t(%s)\n",
1885 dss_feat_get_clk_source_name(dsi_module == 0 ?
1886 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
1887 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
1888 cinfo->dsi_pll_hsdiv_dsi_clk,
1890 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
1893 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1895 seq_printf(s, "dsi fclk source = %s (%s)\n",
1896 dss_get_generic_clk_source_name(dsi_clk_src),
1897 dss_feat_get_clk_source_name(dsi_clk_src));
1899 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1901 seq_printf(s, "DDR_CLK\t\t%lu\n",
1902 cinfo->clkin4ddr / 4);
1904 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
1906 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1908 dsi_runtime_put(dsidev);
1911 void dsi_dump_clocks(struct seq_file *s)
1913 struct platform_device *dsidev;
1916 for (i = 0; i < MAX_NUM_DSI; i++) {
1917 dsidev = dsi_get_dsidev_from_id(i);
1919 dsi_dump_dsidev_clocks(dsidev, s);
1923 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1924 static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1927 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1928 unsigned long flags;
1929 struct dsi_irq_stats stats;
1931 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1933 stats = dsi->irq_stats;
1934 memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
1935 dsi->irq_stats.last_reset = jiffies;
1937 spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
1939 seq_printf(s, "period %u ms\n",
1940 jiffies_to_msecs(jiffies - stats.last_reset));
1942 seq_printf(s, "irqs %d\n", stats.irq_count);
1944 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1946 seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
1962 PIS(LDO_POWER_GOOD);
1967 seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
1968 stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
1969 stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
1970 stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
1971 stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
1973 seq_printf(s, "-- VC interrupts --\n");
1982 PIS(PP_BUSY_CHANGE);
1986 seq_printf(s, "%-20s %10d\n", #x, \
1987 stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
1989 seq_printf(s, "-- CIO interrupts --\n");
2002 PIS(ERRCONTENTIONLP0_1);
2003 PIS(ERRCONTENTIONLP1_1);
2004 PIS(ERRCONTENTIONLP0_2);
2005 PIS(ERRCONTENTIONLP1_2);
2006 PIS(ERRCONTENTIONLP0_3);
2007 PIS(ERRCONTENTIONLP1_3);
2008 PIS(ULPSACTIVENOT_ALL0);
2009 PIS(ULPSACTIVENOT_ALL1);
2013 static void dsi1_dump_irqs(struct seq_file *s)
2015 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
2017 dsi_dump_dsidev_irqs(dsidev, s);
2020 static void dsi2_dump_irqs(struct seq_file *s)
2022 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
2024 dsi_dump_dsidev_irqs(dsidev, s);
2028 static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
2031 #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
2033 if (dsi_runtime_get(dsidev))
2035 dsi_enable_scp_clk(dsidev);
2037 DUMPREG(DSI_REVISION);
2038 DUMPREG(DSI_SYSCONFIG);
2039 DUMPREG(DSI_SYSSTATUS);
2040 DUMPREG(DSI_IRQSTATUS);
2041 DUMPREG(DSI_IRQENABLE);
2043 DUMPREG(DSI_COMPLEXIO_CFG1);
2044 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
2045 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
2046 DUMPREG(DSI_CLK_CTRL);
2047 DUMPREG(DSI_TIMING1);
2048 DUMPREG(DSI_TIMING2);
2049 DUMPREG(DSI_VM_TIMING1);
2050 DUMPREG(DSI_VM_TIMING2);
2051 DUMPREG(DSI_VM_TIMING3);
2052 DUMPREG(DSI_CLK_TIMING);
2053 DUMPREG(DSI_TX_FIFO_VC_SIZE);
2054 DUMPREG(DSI_RX_FIFO_VC_SIZE);
2055 DUMPREG(DSI_COMPLEXIO_CFG2);
2056 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
2057 DUMPREG(DSI_VM_TIMING4);
2058 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
2059 DUMPREG(DSI_VM_TIMING5);
2060 DUMPREG(DSI_VM_TIMING6);
2061 DUMPREG(DSI_VM_TIMING7);
2062 DUMPREG(DSI_STOPCLK_TIMING);
2064 DUMPREG(DSI_VC_CTRL(0));
2065 DUMPREG(DSI_VC_TE(0));
2066 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
2067 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
2068 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
2069 DUMPREG(DSI_VC_IRQSTATUS(0));
2070 DUMPREG(DSI_VC_IRQENABLE(0));
2072 DUMPREG(DSI_VC_CTRL(1));
2073 DUMPREG(DSI_VC_TE(1));
2074 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
2075 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
2076 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
2077 DUMPREG(DSI_VC_IRQSTATUS(1));
2078 DUMPREG(DSI_VC_IRQENABLE(1));
2080 DUMPREG(DSI_VC_CTRL(2));
2081 DUMPREG(DSI_VC_TE(2));
2082 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
2083 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
2084 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
2085 DUMPREG(DSI_VC_IRQSTATUS(2));
2086 DUMPREG(DSI_VC_IRQENABLE(2));
2088 DUMPREG(DSI_VC_CTRL(3));
2089 DUMPREG(DSI_VC_TE(3));
2090 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
2091 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
2092 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
2093 DUMPREG(DSI_VC_IRQSTATUS(3));
2094 DUMPREG(DSI_VC_IRQENABLE(3));
2096 DUMPREG(DSI_DSIPHY_CFG0);
2097 DUMPREG(DSI_DSIPHY_CFG1);
2098 DUMPREG(DSI_DSIPHY_CFG2);
2099 DUMPREG(DSI_DSIPHY_CFG5);
2101 DUMPREG(DSI_PLL_CONTROL);
2102 DUMPREG(DSI_PLL_STATUS);
2103 DUMPREG(DSI_PLL_GO);
2104 DUMPREG(DSI_PLL_CONFIGURATION1);
2105 DUMPREG(DSI_PLL_CONFIGURATION2);
2107 dsi_disable_scp_clk(dsidev);
2108 dsi_runtime_put(dsidev);
2112 static void dsi1_dump_regs(struct seq_file *s)
2114 struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
2116 dsi_dump_dsidev_regs(dsidev, s);
2119 static void dsi2_dump_regs(struct seq_file *s)
2121 struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
2123 dsi_dump_dsidev_regs(dsidev, s);
2126 enum dsi_cio_power_state {
2127 DSI_COMPLEXIO_POWER_OFF = 0x0,
2128 DSI_COMPLEXIO_POWER_ON = 0x1,
2129 DSI_COMPLEXIO_POWER_ULPS = 0x2,
2132 static int dsi_cio_power(struct platform_device *dsidev,
2133 enum dsi_cio_power_state state)
2138 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
2141 while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
2144 DSSERR("failed to set complexio power state to "
2154 static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2158 /* line buffer on OMAP3 is 1024 x 24bits */
2159 /* XXX: for some reason using full buffer size causes
2160 * considerable TX slowdown with update sizes that fill the
2162 if (!dss_has_feature(FEAT_DSI_GNQ))
2165 val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
2169 return 512 * 3; /* 512x24 bits */
2171 return 682 * 3; /* 682x24 bits */
2173 return 853 * 3; /* 853x24 bits */
2175 return 1024 * 3; /* 1024x24 bits */
2177 return 1194 * 3; /* 1194x24 bits */
2179 return 1365 * 3; /* 1365x24 bits */
2181 return 1920 * 3; /* 1920x24 bits */
2188 static int dsi_set_lane_config(struct platform_device *dsidev)
2190 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2191 static const u8 offsets[] = { 0, 4, 8, 12, 16 };
2192 static const enum dsi_lane_function functions[] = {
2202 r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
2204 for (i = 0; i < dsi->num_lanes_used; ++i) {
2205 unsigned offset = offsets[i];
2206 unsigned polarity, lane_number;
2209 for (t = 0; t < dsi->num_lanes_supported; ++t)
2210 if (dsi->lanes[t].function == functions[i])
2213 if (t == dsi->num_lanes_supported)
2217 polarity = dsi->lanes[t].polarity;
2219 r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
2220 r = FLD_MOD(r, polarity, offset + 3, offset + 3);
2223 /* clear the unused lanes */
2224 for (; i < dsi->num_lanes_supported; ++i) {
2225 unsigned offset = offsets[i];
2227 r = FLD_MOD(r, 0, offset + 2, offset);
2228 r = FLD_MOD(r, 0, offset + 3, offset + 3);
2231 dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
2236 static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
2238 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2240 /* convert time in ns to ddr ticks, rounding up */
2241 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2242 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
2245 static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
2247 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2249 unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4;
2250 return ddr * 1000 * 1000 / (ddr_clk / 1000);
2253 static void dsi_cio_timings(struct platform_device *dsidev)
2256 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
2257 u32 tlpx_half, tclk_trail, tclk_zero;
2260 /* calculate timings */
2262 /* 1 * DDR_CLK = 2 * UI */
2264 /* min 40ns + 4*UI max 85ns + 6*UI */
2265 ths_prepare = ns2ddr(dsidev, 70) + 2;
2267 /* min 145ns + 10*UI */
2268 ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
2270 /* min max(8*UI, 60ns+4*UI) */
2271 ths_trail = ns2ddr(dsidev, 60) + 5;
2274 ths_exit = ns2ddr(dsidev, 145);
2277 tlpx_half = ns2ddr(dsidev, 25);
2280 tclk_trail = ns2ddr(dsidev, 60) + 2;
2282 /* min 38ns, max 95ns */
2283 tclk_prepare = ns2ddr(dsidev, 65);
2285 /* min tclk-prepare + tclk-zero = 300ns */
2286 tclk_zero = ns2ddr(dsidev, 260);
2288 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
2289 ths_prepare, ddr2ns(dsidev, ths_prepare),
2290 ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
2291 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
2292 ths_trail, ddr2ns(dsidev, ths_trail),
2293 ths_exit, ddr2ns(dsidev, ths_exit));
2295 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
2296 "tclk_zero %u (%uns)\n",
2297 tlpx_half, ddr2ns(dsidev, tlpx_half),
2298 tclk_trail, ddr2ns(dsidev, tclk_trail),
2299 tclk_zero, ddr2ns(dsidev, tclk_zero));
2300 DSSDBG("tclk_prepare %u (%uns)\n",
2301 tclk_prepare, ddr2ns(dsidev, tclk_prepare));
2303 /* program timings */
2305 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
2306 r = FLD_MOD(r, ths_prepare, 31, 24);
2307 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
2308 r = FLD_MOD(r, ths_trail, 15, 8);
2309 r = FLD_MOD(r, ths_exit, 7, 0);
2310 dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
2312 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
2313 r = FLD_MOD(r, tlpx_half, 20, 16);
2314 r = FLD_MOD(r, tclk_trail, 15, 8);
2315 r = FLD_MOD(r, tclk_zero, 7, 0);
2317 if (dss_has_feature(FEAT_DSI_PHY_DCC)) {
2318 r = FLD_MOD(r, 0, 21, 21); /* DCCEN = disable */
2319 r = FLD_MOD(r, 1, 22, 22); /* CLKINP_DIVBY2EN = enable */
2320 r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
2323 dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
2325 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
2326 r = FLD_MOD(r, tclk_prepare, 7, 0);
2327 dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
2330 /* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
2331 static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
2332 unsigned mask_p, unsigned mask_n)
2334 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2337 u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
2341 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2342 unsigned p = dsi->lanes[i].polarity;
2344 if (mask_p & (1 << i))
2345 l |= 1 << (i * 2 + (p ? 0 : 1));
2347 if (mask_n & (1 << i))
2348 l |= 1 << (i * 2 + (p ? 1 : 0));
2352 * Bits in REGLPTXSCPDAT4TO0DXDY:
2360 /* Set the lane override configuration */
2362 /* REGLPTXSCPDAT4TO0DXDY */
2363 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
2365 /* Enable lane override */
2368 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
2371 static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
2373 /* Disable lane override */
2374 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
2375 /* Reset the lane override configuration */
2376 /* REGLPTXSCPDAT4TO0DXDY */
2377 REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
2380 static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
2382 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2384 bool in_use[DSI_MAX_NR_LANES];
2385 static const u8 offsets_old[] = { 28, 27, 26 };
2386 static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
2389 if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC))
2390 offsets = offsets_old;
2392 offsets = offsets_new;
2394 for (i = 0; i < dsi->num_lanes_supported; ++i)
2395 in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
2402 l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2405 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2406 if (!in_use[i] || (l & (1 << offsets[i])))
2410 if (ok == dsi->num_lanes_supported)
2414 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2415 if (!in_use[i] || (l & (1 << offsets[i])))
2418 DSSERR("CIO TXCLKESC%d domain not coming " \
2419 "out of reset\n", i);
2428 /* return bitmask of enabled lanes, lane0 being the lsb */
2429 static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
2431 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2435 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2436 if (dsi->lanes[i].function != DSI_LANE_UNUSED)
2443 static int dsi_cio_init(struct platform_device *dsidev)
2445 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2449 DSSDBG("DSI CIO init starts");
2451 r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2455 dsi_enable_scp_clk(dsidev);
2457 /* A dummy read using the SCP interface to any DSIPHY register is
2458 * required after DSIPHY reset to complete the reset of the DSI complex
2460 dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
2462 if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
2463 DSSERR("CIO SCP Clock domain not coming out of reset.\n");
2465 goto err_scp_clk_dom;
2468 r = dsi_set_lane_config(dsidev);
2470 goto err_scp_clk_dom;
2472 /* set TX STOP MODE timer to maximum for this operation */
2473 l = dsi_read_reg(dsidev, DSI_TIMING1);
2474 l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2475 l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
2476 l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
2477 l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
2478 dsi_write_reg(dsidev, DSI_TIMING1, l);
2480 if (dsi->ulps_enabled) {
2484 DSSDBG("manual ulps exit\n");
2486 /* ULPS is exited by Mark-1 state for 1ms, followed by
2487 * stop state. DSS HW cannot do this via the normal
2488 * ULPS exit sequence, as after reset the DSS HW thinks
2489 * that we are not in ULPS mode, and refuses to send the
2490 * sequence. So we need to send the ULPS exit sequence
2491 * manually by setting positive lines high and negative lines
2497 for (i = 0; i < dsi->num_lanes_supported; ++i) {
2498 if (dsi->lanes[i].function == DSI_LANE_UNUSED)
2503 dsi_cio_enable_lane_override(dsidev, mask_p, 0);
2506 r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
2510 if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
2511 DSSERR("CIO PWR clock domain not coming out of reset.\n");
2513 goto err_cio_pwr_dom;
2516 dsi_if_enable(dsidev, true);
2517 dsi_if_enable(dsidev, false);
2518 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
2520 r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
2522 goto err_tx_clk_esc_rst;
2524 if (dsi->ulps_enabled) {
2525 /* Keep Mark-1 state for 1ms (as per DSI spec) */
2526 ktime_t wait = ns_to_ktime(1000 * 1000);
2527 set_current_state(TASK_UNINTERRUPTIBLE);
2528 schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
2530 /* Disable the override. The lanes should be set to Mark-11
2531 * state by the HW */
2532 dsi_cio_disable_lane_override(dsidev);
2535 /* FORCE_TX_STOP_MODE_IO */
2536 REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
2538 dsi_cio_timings(dsidev);
2540 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
2541 /* DDR_CLK_ALWAYS_ON */
2542 REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
2543 dsi->vm_timings.ddr_clk_always_on, 13, 13);
2546 dsi->ulps_enabled = false;
2548 DSSDBG("CIO init done\n");
2553 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
2555 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2557 if (dsi->ulps_enabled)
2558 dsi_cio_disable_lane_override(dsidev);
2560 dsi_disable_scp_clk(dsidev);
2561 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2565 static void dsi_cio_uninit(struct platform_device *dsidev)
2567 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2569 /* DDR_CLK_ALWAYS_ON */
2570 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
2572 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2573 dsi_disable_scp_clk(dsidev);
2574 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dsidev));
2577 static void dsi_config_tx_fifo(struct platform_device *dsidev,
2578 enum fifo_size size1, enum fifo_size size2,
2579 enum fifo_size size3, enum fifo_size size4)
2581 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2586 dsi->vc[0].fifo_size = size1;
2587 dsi->vc[1].fifo_size = size2;
2588 dsi->vc[2].fifo_size = size3;
2589 dsi->vc[3].fifo_size = size4;
2591 for (i = 0; i < 4; i++) {
2593 int size = dsi->vc[i].fifo_size;
2595 if (add + size > 4) {
2596 DSSERR("Illegal FIFO configuration\n");
2601 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2603 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
2607 dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
2610 static void dsi_config_rx_fifo(struct platform_device *dsidev,
2611 enum fifo_size size1, enum fifo_size size2,
2612 enum fifo_size size3, enum fifo_size size4)
2614 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2619 dsi->vc[0].fifo_size = size1;
2620 dsi->vc[1].fifo_size = size2;
2621 dsi->vc[2].fifo_size = size3;
2622 dsi->vc[3].fifo_size = size4;
2624 for (i = 0; i < 4; i++) {
2626 int size = dsi->vc[i].fifo_size;
2628 if (add + size > 4) {
2629 DSSERR("Illegal FIFO configuration\n");
2634 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
2636 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
2640 dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
2643 static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
2647 r = dsi_read_reg(dsidev, DSI_TIMING1);
2648 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2649 dsi_write_reg(dsidev, DSI_TIMING1, r);
2651 if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
2652 DSSERR("TX_STOP bit not going down\n");
2659 static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
2661 return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
2664 static void dsi_packet_sent_handler_vp(void *data, u32 mask)
2666 struct dsi_packet_sent_handler_data *vp_data =
2667 (struct dsi_packet_sent_handler_data *) data;
2668 struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
2669 const int channel = dsi->update_channel;
2670 u8 bit = dsi->te_enabled ? 30 : 31;
2672 if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
2673 complete(vp_data->completion);
2676 static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
2678 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2679 DECLARE_COMPLETION_ONSTACK(completion);
2680 struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion };
2684 bit = dsi->te_enabled ? 30 : 31;
2686 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2687 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2691 /* Wait for completion only if TE_EN/TE_START is still set */
2692 if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
2693 if (wait_for_completion_timeout(&completion,
2694 msecs_to_jiffies(10)) == 0) {
2695 DSSERR("Failed to complete previous frame transfer\n");
2701 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2702 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2706 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
2707 &vp_data, DSI_VC_IRQ_PACKET_SENT);
2712 static void dsi_packet_sent_handler_l4(void *data, u32 mask)
2714 struct dsi_packet_sent_handler_data *l4_data =
2715 (struct dsi_packet_sent_handler_data *) data;
2716 struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
2717 const int channel = dsi->update_channel;
2719 if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
2720 complete(l4_data->completion);
2723 static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
2725 DECLARE_COMPLETION_ONSTACK(completion);
2726 struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion };
2729 r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2730 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2734 /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2735 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
2736 if (wait_for_completion_timeout(&completion,
2737 msecs_to_jiffies(10)) == 0) {
2738 DSSERR("Failed to complete previous l4 transfer\n");
2744 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2745 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2749 dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
2750 &l4_data, DSI_VC_IRQ_PACKET_SENT);
2755 static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2757 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2759 WARN_ON(!dsi_bus_is_locked(dsidev));
2761 WARN_ON(in_interrupt());
2763 if (!dsi_vc_is_enabled(dsidev, channel))
2766 switch (dsi->vc[channel].source) {
2767 case DSI_VC_SOURCE_VP:
2768 return dsi_sync_vc_vp(dsidev, channel);
2769 case DSI_VC_SOURCE_L4:
2770 return dsi_sync_vc_l4(dsidev, channel);
2777 static int dsi_vc_enable(struct platform_device *dsidev, int channel,
2780 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
2783 enable = enable ? 1 : 0;
2785 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
2787 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
2788 0, enable) != enable) {
2789 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
2796 static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
2798 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2801 DSSDBG("Initial config of virtual channel %d", channel);
2803 r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2805 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
2806 DSSERR("VC(%d) busy when trying to configure it!\n",
2809 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
2810 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
2811 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
2812 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
2813 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
2814 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
2815 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
2816 if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH))
2817 r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */
2819 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
2820 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
2822 dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
2824 dsi->vc[channel].source = DSI_VC_SOURCE_L4;
2827 static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
2828 enum dsi_vc_source source)
2830 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2832 if (dsi->vc[channel].source == source)
2835 DSSDBG("Source config of virtual channel %d", channel);
2837 dsi_sync_vc(dsidev, channel);
2839 dsi_vc_enable(dsidev, channel, 0);
2842 if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
2843 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
2847 /* SOURCE, 0 = L4, 1 = video port */
2848 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
2850 /* DCS_CMD_ENABLE */
2851 if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
2852 bool enable = source == DSI_VC_SOURCE_VP;
2853 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
2856 dsi_vc_enable(dsidev, channel, 1);
2858 dsi->vc[channel].source = source;
2863 void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
2866 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2867 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2869 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
2871 WARN_ON(!dsi_bus_is_locked(dsidev));
2873 dsi_vc_enable(dsidev, channel, 0);
2874 dsi_if_enable(dsidev, 0);
2876 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
2878 dsi_vc_enable(dsidev, channel, 1);
2879 dsi_if_enable(dsidev, 1);
2881 dsi_force_tx_stop_mode_io(dsidev);
2883 /* start the DDR clock by sending a NULL packet */
2884 if (dsi->vm_timings.ddr_clk_always_on && enable)
2885 dsi_vc_send_null(dssdev, channel);
2887 EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
2889 static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
2891 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2893 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2894 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
2898 (val >> 24) & 0xff);
2902 static void dsi_show_rx_ack_with_err(u16 err)
2904 DSSERR("\tACK with ERROR (%#x):\n", err);
2906 DSSERR("\t\tSoT Error\n");
2908 DSSERR("\t\tSoT Sync Error\n");
2910 DSSERR("\t\tEoT Sync Error\n");
2912 DSSERR("\t\tEscape Mode Entry Command Error\n");
2914 DSSERR("\t\tLP Transmit Sync Error\n");
2916 DSSERR("\t\tHS Receive Timeout Error\n");
2918 DSSERR("\t\tFalse Control Error\n");
2920 DSSERR("\t\t(reserved7)\n");
2922 DSSERR("\t\tECC Error, single-bit (corrected)\n");
2924 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
2925 if (err & (1 << 10))
2926 DSSERR("\t\tChecksum Error\n");
2927 if (err & (1 << 11))
2928 DSSERR("\t\tData type not recognized\n");
2929 if (err & (1 << 12))
2930 DSSERR("\t\tInvalid VC ID\n");
2931 if (err & (1 << 13))
2932 DSSERR("\t\tInvalid Transmission Length\n");
2933 if (err & (1 << 14))
2934 DSSERR("\t\t(reserved14)\n");
2935 if (err & (1 << 15))
2936 DSSERR("\t\tDSI Protocol Violation\n");
2939 static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
2942 /* RX_FIFO_NOT_EMPTY */
2943 while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2946 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
2947 DSSERR("\trawval %#08x\n", val);
2948 dt = FLD_GET(val, 5, 0);
2949 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
2950 u16 err = FLD_GET(val, 23, 8);
2951 dsi_show_rx_ack_with_err(err);
2952 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE) {
2953 DSSERR("\tDCS short response, 1 byte: %#x\n",
2954 FLD_GET(val, 23, 8));
2955 } else if (dt == MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE) {
2956 DSSERR("\tDCS short response, 2 byte: %#x\n",
2957 FLD_GET(val, 23, 8));
2958 } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
2959 DSSERR("\tDCS long response, len %d\n",
2960 FLD_GET(val, 23, 8));
2961 dsi_vc_flush_long_data(dsidev, channel);
2963 DSSERR("\tunknown datatype 0x%02x\n", dt);
2969 static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
2971 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
2973 if (dsi->debug_write || dsi->debug_read)
2974 DSSDBG("dsi_vc_send_bta %d\n", channel);
2976 WARN_ON(!dsi_bus_is_locked(dsidev));
2978 /* RX_FIFO_NOT_EMPTY */
2979 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
2980 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
2981 dsi_vc_flush_receive_data(dsidev, channel);
2984 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
2986 /* flush posted write */
2987 dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
2992 int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
2994 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
2995 DECLARE_COMPLETION_ONSTACK(completion);
2999 r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
3000 &completion, DSI_VC_IRQ_BTA);
3004 r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
3005 DSI_IRQ_ERROR_MASK);
3009 r = dsi_vc_send_bta(dsidev, channel);
3013 if (wait_for_completion_timeout(&completion,
3014 msecs_to_jiffies(500)) == 0) {
3015 DSSERR("Failed to receive BTA\n");
3020 err = dsi_get_errors(dsidev);
3022 DSSERR("Error while sending BTA: %x\n", err);
3027 dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
3028 DSI_IRQ_ERROR_MASK);
3030 dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
3031 &completion, DSI_VC_IRQ_BTA);
3035 EXPORT_SYMBOL(dsi_vc_send_bta_sync);
3037 static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
3038 int channel, u8 data_type, u16 len, u8 ecc)
3040 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3044 WARN_ON(!dsi_bus_is_locked(dsidev));
3046 data_id = data_type | dsi->vc[channel].vc_id << 6;
3048 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
3049 FLD_VAL(ecc, 31, 24);
3051 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
3054 static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
3055 int channel, u8 b1, u8 b2, u8 b3, u8 b4)
3059 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
3061 /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
3062 b1, b2, b3, b4, val); */
3064 dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
3067 static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
3068 u8 data_type, u8 *data, u16 len, u8 ecc)
3071 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3077 if (dsi->debug_write)
3078 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
3081 if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) {
3082 DSSERR("unable to send long packet: packet too long.\n");
3086 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
3088 dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
3091 for (i = 0; i < len >> 2; i++) {
3092 if (dsi->debug_write)
3093 DSSDBG("\tsending full packet %d\n", i);
3100 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
3105 b1 = 0; b2 = 0; b3 = 0;
3107 if (dsi->debug_write)
3108 DSSDBG("\tsending remainder bytes %d\n", i);
3125 dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
3131 static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
3132 u8 data_type, u16 data, u8 ecc)
3134 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3138 WARN_ON(!dsi_bus_is_locked(dsidev));
3140 if (dsi->debug_write)
3141 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
3143 data_type, data & 0xff, (data >> 8) & 0xff);
3145 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
3147 if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
3148 DSSERR("ERROR FIFO FULL, aborting transfer\n");
3152 data_id = data_type | dsi->vc[channel].vc_id << 6;
3154 r = (data_id << 0) | (data << 8) | (ecc << 24);
3156 dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
3161 int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
3163 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3165 return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
3168 EXPORT_SYMBOL(dsi_vc_send_null);
3170 static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
3171 int channel, u8 *data, int len, enum dss_dsi_content_type type)
3176 BUG_ON(type == DSS_DSI_CONTENT_DCS);
3177 r = dsi_vc_send_short(dsidev, channel,
3178 MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
3179 } else if (len == 1) {
3180 r = dsi_vc_send_short(dsidev, channel,
3181 type == DSS_DSI_CONTENT_GENERIC ?
3182 MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
3183 MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
3184 } else if (len == 2) {
3185 r = dsi_vc_send_short(dsidev, channel,
3186 type == DSS_DSI_CONTENT_GENERIC ?
3187 MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
3188 MIPI_DSI_DCS_SHORT_WRITE_PARAM,
3189 data[0] | (data[1] << 8), 0);
3191 r = dsi_vc_send_long(dsidev, channel,
3192 type == DSS_DSI_CONTENT_GENERIC ?
3193 MIPI_DSI_GENERIC_LONG_WRITE :
3194 MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
3200 int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
3203 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3205 return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3206 DSS_DSI_CONTENT_DCS);
3208 EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
3210 int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
3213 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3215 return dsi_vc_write_nosync_common(dsidev, channel, data, len,
3216 DSS_DSI_CONTENT_GENERIC);
3218 EXPORT_SYMBOL(dsi_vc_generic_write_nosync);
3220 static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
3221 u8 *data, int len, enum dss_dsi_content_type type)
3223 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3226 r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
3230 r = dsi_vc_send_bta_sync(dssdev, channel);
3234 /* RX_FIFO_NOT_EMPTY */
3235 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
3236 DSSERR("rx fifo not empty after write, dumping data:\n");
3237 dsi_vc_flush_receive_data(dsidev, channel);
3244 DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n",
3245 channel, data[0], len);
3249 int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3252 return dsi_vc_write_common(dssdev, channel, data, len,
3253 DSS_DSI_CONTENT_DCS);
3255 EXPORT_SYMBOL(dsi_vc_dcs_write);
3257 int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
3260 return dsi_vc_write_common(dssdev, channel, data, len,
3261 DSS_DSI_CONTENT_GENERIC);
3263 EXPORT_SYMBOL(dsi_vc_generic_write);
3265 int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
3267 return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
3269 EXPORT_SYMBOL(dsi_vc_dcs_write_0);
3271 int dsi_vc_generic_write_0(struct omap_dss_device *dssdev, int channel)
3273 return dsi_vc_generic_write(dssdev, channel, NULL, 0);
3275 EXPORT_SYMBOL(dsi_vc_generic_write_0);
3277 int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3283 return dsi_vc_dcs_write(dssdev, channel, buf, 2);
3285 EXPORT_SYMBOL(dsi_vc_dcs_write_1);
3287 int dsi_vc_generic_write_1(struct omap_dss_device *dssdev, int channel,
3290 return dsi_vc_generic_write(dssdev, channel, ¶m, 1);
3292 EXPORT_SYMBOL(dsi_vc_generic_write_1);
3294 int dsi_vc_generic_write_2(struct omap_dss_device *dssdev, int channel,
3295 u8 param1, u8 param2)
3300 return dsi_vc_generic_write(dssdev, channel, buf, 2);
3302 EXPORT_SYMBOL(dsi_vc_generic_write_2);
3304 static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
3305 int channel, u8 dcs_cmd)
3307 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3310 if (dsi->debug_read)
3311 DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
3314 r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
3316 DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
3317 " failed\n", channel, dcs_cmd);
3324 static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
3325 int channel, u8 *reqdata, int reqlen)
3327 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3332 if (dsi->debug_read)
3333 DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n",
3337 data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
3339 } else if (reqlen == 1) {
3340 data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
3342 } else if (reqlen == 2) {
3343 data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
3344 data = reqdata[0] | (reqdata[1] << 8);
3350 r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
3352 DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
3353 " failed\n", channel, reqlen);
3360 static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
3361 u8 *buf, int buflen, enum dss_dsi_content_type type)
3363 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3368 /* RX_FIFO_NOT_EMPTY */
3369 if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
3370 DSSERR("RX fifo empty when trying to read.\n");
3375 val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
3376 if (dsi->debug_read)
3377 DSSDBG("\theader: %08x\n", val);
3378 dt = FLD_GET(val, 5, 0);
3379 if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
3380 u16 err = FLD_GET(val, 23, 8);
3381 dsi_show_rx_ack_with_err(err);
3385 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3386 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
3387 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE)) {
3388 u8 data = FLD_GET(val, 15, 8);
3389 if (dsi->debug_read)
3390 DSSDBG("\t%s short response, 1 byte: %02x\n",
3391 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3402 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3403 MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
3404 MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE)) {
3405 u16 data = FLD_GET(val, 23, 8);
3406 if (dsi->debug_read)
3407 DSSDBG("\t%s short response, 2 byte: %04x\n",
3408 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3416 buf[0] = data & 0xff;
3417 buf[1] = (data >> 8) & 0xff;
3420 } else if (dt == (type == DSS_DSI_CONTENT_GENERIC ?
3421 MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
3422 MIPI_DSI_RX_DCS_LONG_READ_RESPONSE)) {
3424 int len = FLD_GET(val, 23, 8);
3425 if (dsi->debug_read)
3426 DSSDBG("\t%s long response, len %d\n",
3427 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" :
3435 /* two byte checksum ends the packet, not included in len */
3436 for (w = 0; w < len + 2;) {
3438 val = dsi_read_reg(dsidev,
3439 DSI_VC_SHORT_PACKET_HEADER(channel));
3440 if (dsi->debug_read)
3441 DSSDBG("\t\t%02x %02x %02x %02x\n",
3445 (val >> 24) & 0xff);
3447 for (b = 0; b < 4; ++b) {
3449 buf[w] = (val >> (b * 8)) & 0xff;
3450 /* we discard the 2 byte checksum */
3457 DSSERR("\tunknown datatype 0x%02x\n", dt);
3463 DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
3464 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
3469 int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
3470 u8 *buf, int buflen)
3472 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3475 r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
3479 r = dsi_vc_send_bta_sync(dssdev, channel);
3483 r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3484 DSS_DSI_CONTENT_DCS);
3495 DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
3498 EXPORT_SYMBOL(dsi_vc_dcs_read);
3500 static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
3501 u8 *reqdata, int reqlen, u8 *buf, int buflen)
3503 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3506 r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
3510 r = dsi_vc_send_bta_sync(dssdev, channel);
3514 r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
3515 DSS_DSI_CONTENT_GENERIC);
3527 int dsi_vc_generic_read_0(struct omap_dss_device *dssdev, int channel, u8 *buf,
3532 r = dsi_vc_generic_read(dssdev, channel, NULL, 0, buf, buflen);
3534 DSSERR("dsi_vc_generic_read_0(ch %d) failed\n", channel);
3540 EXPORT_SYMBOL(dsi_vc_generic_read_0);
3542 int dsi_vc_generic_read_1(struct omap_dss_device *dssdev, int channel, u8 param,
3543 u8 *buf, int buflen)
3547 r = dsi_vc_generic_read(dssdev, channel, ¶m, 1, buf, buflen);
3549 DSSERR("dsi_vc_generic_read_1(ch %d) failed\n", channel);
3555 EXPORT_SYMBOL(dsi_vc_generic_read_1);
3557 int dsi_vc_generic_read_2(struct omap_dss_device *dssdev, int channel,
3558 u8 param1, u8 param2, u8 *buf, int buflen)
3563 reqdata[0] = param1;
3564 reqdata[1] = param2;
3566 r = dsi_vc_generic_read(dssdev, channel, reqdata, 2, buf, buflen);
3568 DSSERR("dsi_vc_generic_read_2(ch %d) failed\n", channel);
3574 EXPORT_SYMBOL(dsi_vc_generic_read_2);
3576 int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
3579 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3581 return dsi_vc_send_short(dsidev, channel,
3582 MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
3584 EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
3586 static int dsi_enter_ulps(struct platform_device *dsidev)
3588 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3589 DECLARE_COMPLETION_ONSTACK(completion);
3593 DSSDBG("Entering ULPS");
3595 WARN_ON(!dsi_bus_is_locked(dsidev));
3597 WARN_ON(dsi->ulps_enabled);
3599 if (dsi->ulps_enabled)
3602 /* DDR_CLK_ALWAYS_ON */
3603 if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
3604 dsi_if_enable(dsidev, 0);
3605 REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
3606 dsi_if_enable(dsidev, 1);
3609 dsi_sync_vc(dsidev, 0);
3610 dsi_sync_vc(dsidev, 1);
3611 dsi_sync_vc(dsidev, 2);
3612 dsi_sync_vc(dsidev, 3);
3614 dsi_force_tx_stop_mode_io(dsidev);
3616 dsi_vc_enable(dsidev, 0, false);
3617 dsi_vc_enable(dsidev, 1, false);
3618 dsi_vc_enable(dsidev, 2, false);
3619 dsi_vc_enable(dsidev, 3, false);
3621 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
3622 DSSERR("HS busy when enabling ULPS\n");
3626 if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
3627 DSSERR("LP busy when enabling ULPS\n");
3631 r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
3632 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3638 for (i = 0; i < dsi->num_lanes_supported; ++i) {
3639 if (dsi->lanes[i].function == DSI_LANE_UNUSED)
3643 /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
3644 /* LANEx_ULPS_SIG2 */
3645 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
3647 /* flush posted write and wait for SCP interface to finish the write */
3648 dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3650 if (wait_for_completion_timeout(&completion,
3651 msecs_to_jiffies(1000)) == 0) {
3652 DSSERR("ULPS enable timeout\n");
3657 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3658 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3660 /* Reset LANEx_ULPS_SIG2 */
3661 REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
3663 /* flush posted write and wait for SCP interface to finish the write */
3664 dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
3666 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
3668 dsi_if_enable(dsidev, false);
3670 dsi->ulps_enabled = true;
3675 dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
3676 DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
3680 static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
3681 unsigned ticks, bool x4, bool x16)
3684 unsigned long total_ticks;
3687 BUG_ON(ticks > 0x1fff);
3689 /* ticks in DSI_FCK */
3690 fck = dsi_fclk_rate(dsidev);
3692 r = dsi_read_reg(dsidev, DSI_TIMING2);
3693 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
3694 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
3695 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
3696 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
3697 dsi_write_reg(dsidev, DSI_TIMING2, r);
3699 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3701 DSSDBG("LP_RX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3703 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3704 (total_ticks * 1000) / (fck / 1000 / 1000));
3707 static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
3711 unsigned long total_ticks;
3714 BUG_ON(ticks > 0x1fff);
3716 /* ticks in DSI_FCK */
3717 fck = dsi_fclk_rate(dsidev);
3719 r = dsi_read_reg(dsidev, DSI_TIMING1);
3720 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
3721 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
3722 r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
3723 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
3724 dsi_write_reg(dsidev, DSI_TIMING1, r);
3726 total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
3728 DSSDBG("TA_TO %lu ticks (%#x%s%s) = %lu ns\n",
3730 ticks, x8 ? " x8" : "", x16 ? " x16" : "",
3731 (total_ticks * 1000) / (fck / 1000 / 1000));
3734 static void dsi_set_stop_state_counter(struct platform_device *dsidev,
3735 unsigned ticks, bool x4, bool x16)
3738 unsigned long total_ticks;
3741 BUG_ON(ticks > 0x1fff);
3743 /* ticks in DSI_FCK */
3744 fck = dsi_fclk_rate(dsidev);
3746 r = dsi_read_reg(dsidev, DSI_TIMING1);
3747 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
3748 r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
3749 r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
3750 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
3751 dsi_write_reg(dsidev, DSI_TIMING1, r);
3753 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3755 DSSDBG("STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns\n",
3757 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3758 (total_ticks * 1000) / (fck / 1000 / 1000));
3761 static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
3762 unsigned ticks, bool x4, bool x16)
3765 unsigned long total_ticks;
3768 BUG_ON(ticks > 0x1fff);
3770 /* ticks in TxByteClkHS */
3771 fck = dsi_get_txbyteclkhs(dsidev);
3773 r = dsi_read_reg(dsidev, DSI_TIMING2);
3774 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
3775 r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
3776 r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
3777 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
3778 dsi_write_reg(dsidev, DSI_TIMING2, r);
3780 total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
3782 DSSDBG("HS_TX_TO %lu ticks (%#x%s%s) = %lu ns\n",
3784 ticks, x4 ? " x4" : "", x16 ? " x16" : "",
3785 (total_ticks * 1000) / (fck / 1000 / 1000));
3788 static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
3790 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3791 int num_line_buffers;
3793 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
3794 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3795 struct omap_video_timings *timings = &dsi->timings;
3797 * Don't use line buffers if width is greater than the video
3798 * port's line buffer size
3800 if (dsi->line_buffer_size <= timings->x_res * bpp / 8)
3801 num_line_buffers = 0;
3803 num_line_buffers = 2;
3805 /* Use maximum number of line buffers in command mode */
3806 num_line_buffers = 2;
3810 REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
3813 static void dsi_config_vp_sync_events(struct platform_device *dsidev)
3815 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3816 bool vsync_end = dsi->vm_timings.vp_vsync_end;
3817 bool hsync_end = dsi->vm_timings.vp_hsync_end;
3820 r = dsi_read_reg(dsidev, DSI_CTRL);
3821 r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
3822 r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
3823 r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
3824 r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
3825 r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */
3826 r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
3827 r = FLD_MOD(r, hsync_end, 18, 18); /* VP_HSYNC_END */
3828 dsi_write_reg(dsidev, DSI_CTRL, r);
3831 static void dsi_config_blanking_modes(struct platform_device *dsidev)
3833 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3834 int blanking_mode = dsi->vm_timings.blanking_mode;
3835 int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
3836 int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
3837 int hsa_blanking_mode = dsi->vm_timings.hsa_blanking_mode;
3841 * 0 = TX FIFO packets sent or LPS in corresponding blanking periods
3842 * 1 = Long blanking packets are sent in corresponding blanking periods
3844 r = dsi_read_reg(dsidev, DSI_CTRL);
3845 r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
3846 r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
3847 r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
3848 r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
3849 dsi_write_reg(dsidev, DSI_CTRL, r);
3853 * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
3854 * results in maximum transition time for data and clock lanes to enter and
3855 * exit HS mode. Hence, this is the scenario where the least amount of command
3856 * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
3857 * clock cycles that can be used to interleave command mode data in HS so that
3858 * all scenarios are satisfied.
3860 static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
3861 int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
3866 * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
3867 * time of data lanes only, if it isn't set, we need to consider HS
3868 * transition time of both data and clock lanes. HS transition time
3869 * of Scenario 3 is considered.
3872 transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
3875 trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
3876 trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
3878 transition = max(trans1, trans2);
3881 return blank > transition ? blank - transition : 0;
3885 * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
3886 * results in maximum transition time for data lanes to enter and exit LP mode.
3887 * Hence, this is the scenario where the least amount of command mode data can
3888 * be interleaved. We program the minimum amount of bytes that can be
3889 * interleaved in LP so that all scenarios are satisfied.
3891 static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
3892 int lp_clk_div, int tdsi_fclk)
3894 int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
3895 int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
3896 int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
3897 int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
3898 int lp_inter; /* cmd mode data that can be interleaved, in bytes */
3900 /* maximum LP transition time according to Scenario 1 */
3901 trans_lp = exit_hs + max(enter_hs, 2) + 1;
3903 /* CLKIN4DDR = 16 * TXBYTECLKHS */
3904 tlp_avail = thsbyte_clk * (blank - trans_lp);
3906 ttxclkesc = tdsi_fclk * lp_clk_div;
3908 lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
3911 return max(lp_inter, 0);
3914 static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
3916 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3918 int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
3919 int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
3920 int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
3921 int tclk_trail, ths_exit, exiths_clk;
3923 struct omap_video_timings *timings = &dsi->timings;
3924 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
3925 int ndl = dsi->num_lanes_used - 1;
3926 int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.regm_dsi + 1;
3927 int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
3928 int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
3929 int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
3930 int bl_interleave_hs = 0, bl_interleave_lp = 0;
3933 r = dsi_read_reg(dsidev, DSI_CTRL);
3934 blanking_mode = FLD_GET(r, 20, 20);
3935 hfp_blanking_mode = FLD_GET(r, 21, 21);
3936 hbp_blanking_mode = FLD_GET(r, 22, 22);
3937 hsa_blanking_mode = FLD_GET(r, 23, 23);
3939 r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3940 hbp = FLD_GET(r, 11, 0);
3941 hfp = FLD_GET(r, 23, 12);
3942 hsa = FLD_GET(r, 31, 24);
3944 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3945 ddr_clk_post = FLD_GET(r, 7, 0);
3946 ddr_clk_pre = FLD_GET(r, 15, 8);
3948 r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
3949 exit_hs_mode_lat = FLD_GET(r, 15, 0);
3950 enter_hs_mode_lat = FLD_GET(r, 31, 16);
3952 r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
3953 lp_clk_div = FLD_GET(r, 12, 0);
3954 ddr_alwon = FLD_GET(r, 13, 13);
3956 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3957 ths_exit = FLD_GET(r, 7, 0);
3959 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3960 tclk_trail = FLD_GET(r, 15, 8);
3962 exiths_clk = ths_exit + tclk_trail;
3964 width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3965 bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
3967 if (!hsa_blanking_mode) {
3968 hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
3969 enter_hs_mode_lat, exit_hs_mode_lat,
3970 exiths_clk, ddr_clk_pre, ddr_clk_post);
3971 hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
3972 enter_hs_mode_lat, exit_hs_mode_lat,
3973 lp_clk_div, dsi_fclk_hsdiv);
3976 if (!hfp_blanking_mode) {
3977 hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
3978 enter_hs_mode_lat, exit_hs_mode_lat,
3979 exiths_clk, ddr_clk_pre, ddr_clk_post);
3980 hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
3981 enter_hs_mode_lat, exit_hs_mode_lat,
3982 lp_clk_div, dsi_fclk_hsdiv);
3985 if (!hbp_blanking_mode) {
3986 hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
3987 enter_hs_mode_lat, exit_hs_mode_lat,
3988 exiths_clk, ddr_clk_pre, ddr_clk_post);
3990 hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
3991 enter_hs_mode_lat, exit_hs_mode_lat,
3992 lp_clk_div, dsi_fclk_hsdiv);
3995 if (!blanking_mode) {
3996 bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
3997 enter_hs_mode_lat, exit_hs_mode_lat,
3998 exiths_clk, ddr_clk_pre, ddr_clk_post);
4000 bl_interleave_lp = dsi_compute_interleave_lp(bllp,
4001 enter_hs_mode_lat, exit_hs_mode_lat,
4002 lp_clk_div, dsi_fclk_hsdiv);
4005 DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
4006 hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
4009 DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
4010 hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
4013 r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
4014 r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
4015 r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
4016 r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
4017 dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
4019 r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
4020 r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
4021 r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
4022 r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
4023 dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
4025 r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
4026 r = FLD_MOD(r, bl_interleave_hs, 31, 15);
4027 r = FLD_MOD(r, bl_interleave_lp, 16, 0);
4028 dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
4031 static int dsi_proto_config(struct platform_device *dsidev)
4033 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4037 dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
4042 dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
4047 /* XXX what values for the timeouts? */
4048 dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
4049 dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
4050 dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
4051 dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
4053 switch (dsi_get_pixel_size(dsi->pix_fmt)) {
4068 r = dsi_read_reg(dsidev, DSI_CTRL);
4069 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
4070 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
4071 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
4072 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
4073 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
4074 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
4075 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
4076 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
4077 if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) {
4078 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
4079 /* DCS_CMD_CODE, 1=start, 0=continue */
4080 r = FLD_MOD(r, 0, 25, 25);
4083 dsi_write_reg(dsidev, DSI_CTRL, r);
4085 dsi_config_vp_num_line_buffers(dsidev);
4087 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4088 dsi_config_vp_sync_events(dsidev);
4089 dsi_config_blanking_modes(dsidev);
4090 dsi_config_cmd_mode_interleaving(dsidev);
4093 dsi_vc_initial_config(dsidev, 0);
4094 dsi_vc_initial_config(dsidev, 1);
4095 dsi_vc_initial_config(dsidev, 2);
4096 dsi_vc_initial_config(dsidev, 3);
4101 static void dsi_proto_timings(struct platform_device *dsidev)
4103 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4104 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
4105 unsigned tclk_pre, tclk_post;
4106 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
4107 unsigned ths_trail, ths_exit;
4108 unsigned ddr_clk_pre, ddr_clk_post;
4109 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
4111 int ndl = dsi->num_lanes_used - 1;
4114 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
4115 ths_prepare = FLD_GET(r, 31, 24);
4116 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
4117 ths_zero = ths_prepare_ths_zero - ths_prepare;
4118 ths_trail = FLD_GET(r, 15, 8);
4119 ths_exit = FLD_GET(r, 7, 0);
4121 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
4122 tlpx = FLD_GET(r, 20, 16) * 2;
4123 tclk_trail = FLD_GET(r, 15, 8);
4124 tclk_zero = FLD_GET(r, 7, 0);
4126 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
4127 tclk_prepare = FLD_GET(r, 7, 0);
4131 /* min 60ns + 52*UI */
4132 tclk_post = ns2ddr(dsidev, 60) + 26;
4134 ths_eot = DIV_ROUND_UP(4, ndl);
4136 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
4138 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
4140 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
4141 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
4143 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
4144 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
4145 r = FLD_MOD(r, ddr_clk_post, 7, 0);
4146 dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
4148 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
4152 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
4153 DIV_ROUND_UP(ths_prepare, 4) +
4154 DIV_ROUND_UP(ths_zero + 3, 4);
4156 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
4158 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
4159 FLD_VAL(exit_hs_mode_lat, 15, 0);
4160 dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
4162 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
4163 enter_hs_mode_lat, exit_hs_mode_lat);
4165 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4166 /* TODO: Implement a video mode check_timings function */
4167 int hsa = dsi->vm_timings.hsa;
4168 int hfp = dsi->vm_timings.hfp;
4169 int hbp = dsi->vm_timings.hbp;
4170 int vsa = dsi->vm_timings.vsa;
4171 int vfp = dsi->vm_timings.vfp;
4172 int vbp = dsi->vm_timings.vbp;
4173 int window_sync = dsi->vm_timings.window_sync;
4174 bool hsync_end = dsi->vm_timings.vp_hsync_end;
4175 struct omap_video_timings *timings = &dsi->timings;
4176 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
4177 int tl, t_he, width_bytes;
4180 ((hsa == 0 && ndl == 3) ? 1 : DIV_ROUND_UP(4, ndl)) : 0;
4182 width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
4184 /* TL = t_HS + HSA + t_HE + HFP + ceil((WC + 6) / NDL) + HBP */
4185 tl = DIV_ROUND_UP(4, ndl) + (hsync_end ? hsa : 0) + t_he + hfp +
4186 DIV_ROUND_UP(width_bytes + 6, ndl) + hbp;
4188 DSSDBG("HBP: %d, HFP: %d, HSA: %d, TL: %d TXBYTECLKHS\n", hbp,
4189 hfp, hsync_end ? hsa : 0, tl);
4190 DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
4191 vsa, timings->y_res);
4193 r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
4194 r = FLD_MOD(r, hbp, 11, 0); /* HBP */
4195 r = FLD_MOD(r, hfp, 23, 12); /* HFP */
4196 r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
4197 dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
4199 r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
4200 r = FLD_MOD(r, vbp, 7, 0); /* VBP */
4201 r = FLD_MOD(r, vfp, 15, 8); /* VFP */
4202 r = FLD_MOD(r, vsa, 23, 16); /* VSA */
4203 r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
4204 dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
4206 r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
4207 r = FLD_MOD(r, timings->y_res, 14, 0); /* VACT */
4208 r = FLD_MOD(r, tl, 31, 16); /* TL */
4209 dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
4213 int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
4214 const struct omap_dsi_pin_config *pin_cfg)
4216 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4217 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4220 struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
4224 static const enum dsi_lane_function functions[] = {
4232 num_pins = pin_cfg->num_pins;
4233 pins = pin_cfg->pins;
4235 if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2
4236 || num_pins % 2 != 0)
4239 for (i = 0; i < DSI_MAX_NR_LANES; ++i)
4240 lanes[i].function = DSI_LANE_UNUSED;
4244 for (i = 0; i < num_pins; i += 2) {
4251 if (dx < 0 || dx >= dsi->num_lanes_supported * 2)
4254 if (dy < 0 || dy >= dsi->num_lanes_supported * 2)
4269 lanes[lane].function = functions[i / 2];
4270 lanes[lane].polarity = pol;
4274 memcpy(dsi->lanes, lanes, sizeof(dsi->lanes));
4275 dsi->num_lanes_used = num_lanes;
4279 EXPORT_SYMBOL(omapdss_dsi_configure_pins);
4281 static int dsi_set_clocks(struct omap_dss_device *dssdev,
4282 unsigned long ddr_clk, unsigned long lp_clk)
4284 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4285 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4286 struct dsi_clock_info cinfo;
4287 struct dispc_clock_info dispc_cinfo;
4288 unsigned lp_clk_div;
4289 unsigned long dsi_fclk;
4290 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
4294 DSSDBG("Setting DSI clocks: ddr_clk %lu, lp_clk %lu", ddr_clk, lp_clk);
4296 /* Calculate PLL output clock */
4297 r = dsi_pll_calc_ddrfreq(dsidev, ddr_clk * 4, &cinfo);
4301 /* Calculate PLL's DSI clock */
4302 dsi_pll_calc_dsi_fck(dsidev, &cinfo);
4304 /* Calculate PLL's DISPC clock and pck & lck divs */
4305 pck = cinfo.clkin4ddr / 16 * (dsi->num_lanes_used - 1) * 8 / bpp;
4306 DSSDBG("finding dispc dividers for pck %lu\n", pck);
4307 r = dsi_pll_calc_dispc_fck(dsidev, pck, &cinfo, &dispc_cinfo);
4311 /* Calculate LP clock */
4312 dsi_fclk = cinfo.dsi_pll_hsdiv_dsi_clk;
4313 lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk * 2);
4315 dsi->user_dsi_cinfo.regn = cinfo.regn;
4316 dsi->user_dsi_cinfo.regm = cinfo.regm;
4317 dsi->user_dsi_cinfo.regm_dispc = cinfo.regm_dispc;
4318 dsi->user_dsi_cinfo.regm_dsi = cinfo.regm_dsi;
4320 dsi->user_dsi_cinfo.lp_clk_div = lp_clk_div;
4322 dsi->user_dispc_cinfo.lck_div = dispc_cinfo.lck_div;
4323 dsi->user_dispc_cinfo.pck_div = dispc_cinfo.pck_div;
4325 dsi->user_dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK;
4327 dsi->user_lcd_clk_src =
4328 dsi->module_id == 0 ?
4329 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
4330 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
4332 dsi->user_dsi_fclk_src =
4333 dsi->module_id == 0 ?
4334 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
4335 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI;
4342 int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4344 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4345 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4346 struct omap_overlay_manager *mgr = dsi->output.manager;
4347 int bpp = dsi_get_pixel_size(dsi->pix_fmt);
4348 struct omap_dss_output *out = &dsi->output;
4353 if (out == NULL || out->manager == NULL) {
4354 DSSERR("failed to enable display: no output/manager\n");
4358 r = dsi_display_init_dispc(dsidev, mgr);
4360 goto err_init_dispc;
4362 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4363 switch (dsi->pix_fmt) {
4364 case OMAP_DSS_DSI_FMT_RGB888:
4365 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
4367 case OMAP_DSS_DSI_FMT_RGB666:
4368 data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
4370 case OMAP_DSS_DSI_FMT_RGB666_PACKED:
4371 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
4373 case OMAP_DSS_DSI_FMT_RGB565:
4374 data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
4381 dsi_if_enable(dsidev, false);
4382 dsi_vc_enable(dsidev, channel, false);
4384 /* MODE, 1 = video mode */
4385 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
4387 word_count = DIV_ROUND_UP(dsi->timings.x_res * bpp, 8);
4389 dsi_vc_write_long_header(dsidev, channel, data_type,
4392 dsi_vc_enable(dsidev, channel, true);
4393 dsi_if_enable(dsidev, true);
4396 r = dss_mgr_enable(mgr);
4398 goto err_mgr_enable;
4403 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4404 dsi_if_enable(dsidev, false);
4405 dsi_vc_enable(dsidev, channel, false);
4408 dsi_display_uninit_dispc(dsidev, mgr);
4412 EXPORT_SYMBOL(dsi_enable_video_output);
4414 void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
4416 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4417 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4418 struct omap_overlay_manager *mgr = dsi->output.manager;
4420 if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
4421 dsi_if_enable(dsidev, false);
4422 dsi_vc_enable(dsidev, channel, false);
4424 /* MODE, 0 = command mode */
4425 REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
4427 dsi_vc_enable(dsidev, channel, true);
4428 dsi_if_enable(dsidev, true);
4431 dss_mgr_disable(mgr);
4433 dsi_display_uninit_dispc(dsidev, mgr);
4435 EXPORT_SYMBOL(dsi_disable_video_output);
4437 static void dsi_update_screen_dispc(struct platform_device *dsidev)
4439 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4440 struct omap_overlay_manager *mgr = dsi->output.manager;
4445 unsigned packet_payload;
4446 unsigned packet_len;
4449 const unsigned channel = dsi->update_channel;
4450 const unsigned line_buf_size = dsi->line_buffer_size;
4451 u16 w = dsi->timings.x_res;
4452 u16 h = dsi->timings.y_res;
4454 DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
4456 dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
4458 bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
4459 bytespl = w * bytespp;
4460 bytespf = bytespl * h;
4462 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
4463 * number of lines in a packet. See errata about VP_CLK_RATIO */
4465 if (bytespf < line_buf_size)
4466 packet_payload = bytespf;
4468 packet_payload = (line_buf_size) / bytespl * bytespl;
4470 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
4471 total_len = (bytespf / packet_payload) * packet_len;
4473 if (bytespf % packet_payload)
4474 total_len += (bytespf % packet_payload) + 1;
4476 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
4477 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
4479 dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
4482 if (dsi->te_enabled)
4483 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
4485 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
4486 dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
4488 /* We put SIDLEMODE to no-idle for the duration of the transfer,
4489 * because DSS interrupts are not capable of waking up the CPU and the
4490 * framedone interrupt could be delayed for quite a long time. I think
4491 * the same goes for any DSS interrupts, but for some reason I have not
4492 * seen the problem anywhere else than here.
4494 dispc_disable_sidle();
4496 dsi_perf_mark_start(dsidev);
4498 r = schedule_delayed_work(&dsi->framedone_timeout_work,
4499 msecs_to_jiffies(250));
4502 dss_mgr_set_timings(mgr, &dsi->timings);
4504 dss_mgr_start_update(mgr);
4506 if (dsi->te_enabled) {
4507 /* disable LP_RX_TO, so that we can receive TE. Time to wait
4508 * for TE is longer than the timer allows */
4509 REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
4511 dsi_vc_send_bta(dsidev, channel);
4513 #ifdef DSI_CATCH_MISSING_TE
4514 mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
4519 #ifdef DSI_CATCH_MISSING_TE
4520 static void dsi_te_timeout(unsigned long arg)
4522 DSSERR("TE not received for 250ms!\n");
4526 static void dsi_handle_framedone(struct platform_device *dsidev, int error)
4528 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4530 /* SIDLEMODE back to smart-idle */
4531 dispc_enable_sidle();
4533 if (dsi->te_enabled) {
4534 /* enable LP_RX_TO again after the TE */
4535 REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
4538 dsi->framedone_callback(error, dsi->framedone_data);
4541 dsi_perf_show(dsidev, "DISPC");
4544 static void dsi_framedone_timeout_work_callback(struct work_struct *work)
4546 struct dsi_data *dsi = container_of(work, struct dsi_data,
4547 framedone_timeout_work.work);
4548 /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
4549 * 250ms which would conflict with this timeout work. What should be
4550 * done is first cancel the transfer on the HW, and then cancel the
4551 * possibly scheduled framedone work. However, cancelling the transfer
4552 * on the HW is buggy, and would probably require resetting the whole
4555 DSSERR("Framedone not received for 250ms!\n");
4557 dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
4560 static void dsi_framedone_irq_callback(void *data)
4562 struct platform_device *dsidev = (struct platform_device *) data;
4563 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4565 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
4566 * turns itself off. However, DSI still has the pixels in its buffers,
4567 * and is sending the data.
4570 cancel_delayed_work(&dsi->framedone_timeout_work);
4572 dsi_handle_framedone(dsidev, 0);
4575 int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
4576 void (*callback)(int, void *), void *data)
4578 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4579 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4582 dsi_perf_mark_setup(dsidev);
4584 dsi->update_channel = channel;
4586 dsi->framedone_callback = callback;
4587 dsi->framedone_data = data;
4589 dw = dsi->timings.x_res;
4590 dh = dsi->timings.y_res;
4593 dsi->update_bytes = dw * dh *
4594 dsi_get_pixel_size(dsi->pix_fmt) / 8;
4596 dsi_update_screen_dispc(dsidev);
4600 EXPORT_SYMBOL(omap_dsi_update);
4604 static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
4606 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4607 struct dispc_clock_info dispc_cinfo;
4611 fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
4613 dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
4614 dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
4616 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
4618 DSSERR("Failed to calc dispc clocks\n");
4622 dsi->mgr_config.clock_info = dispc_cinfo;
4627 static int dsi_display_init_dispc(struct platform_device *dsidev,
4628 struct omap_overlay_manager *mgr)
4630 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4633 dss_select_lcd_clk_source(mgr->id, dsi->user_lcd_clk_src);
4635 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4636 dsi->timings.hsw = 1;
4637 dsi->timings.hfp = 1;
4638 dsi->timings.hbp = 1;
4639 dsi->timings.vsw = 1;
4640 dsi->timings.vfp = 0;
4641 dsi->timings.vbp = 0;
4643 r = dss_mgr_register_framedone_handler(mgr,
4644 dsi_framedone_irq_callback, dsidev);
4646 DSSERR("can't register FRAMEDONE handler\n");
4650 dsi->mgr_config.stallmode = true;
4651 dsi->mgr_config.fifohandcheck = true;
4653 dsi->mgr_config.stallmode = false;
4654 dsi->mgr_config.fifohandcheck = false;
4658 * override interlace, logic level and edge related parameters in
4659 * omap_video_timings with default values
4661 dsi->timings.interlace = false;
4662 dsi->timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4663 dsi->timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
4664 dsi->timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
4665 dsi->timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
4666 dsi->timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
4668 dss_mgr_set_timings(mgr, &dsi->timings);
4670 r = dsi_configure_dispc_clocks(dsidev);
4674 dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
4675 dsi->mgr_config.video_port_width =
4676 dsi_get_pixel_size(dsi->pix_fmt);
4677 dsi->mgr_config.lcden_sig_polarity = 0;
4679 dss_mgr_set_lcd_config(mgr, &dsi->mgr_config);
4683 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4684 dss_mgr_unregister_framedone_handler(mgr,
4685 dsi_framedone_irq_callback, dsidev);
4687 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
4691 static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4692 struct omap_overlay_manager *mgr)
4694 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4696 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
4697 dss_mgr_unregister_framedone_handler(mgr,
4698 dsi_framedone_irq_callback, dsidev);
4700 dss_select_lcd_clk_source(mgr->id, OMAP_DSS_CLK_SRC_FCK);
4703 static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
4705 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4706 struct dsi_clock_info cinfo;
4709 cinfo = dsi->user_dsi_cinfo;
4711 r = dsi_calc_clock_rates(dsidev, &cinfo);
4713 DSSERR("Failed to calc dsi clocks\n");
4717 r = dsi_pll_set_clock_div(dsidev, &cinfo);
4719 DSSERR("Failed to set dsi clocks\n");
4726 static int dsi_display_init_dsi(struct platform_device *dsidev)
4728 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4731 r = dsi_pll_init(dsidev, true, true);
4735 r = dsi_configure_dsi_clocks(dsidev);
4739 dss_select_dsi_clk_source(dsi->module_id, dsi->user_dsi_fclk_src);
4743 r = dsi_cio_init(dsidev);
4747 _dsi_print_reset_status(dsidev);
4749 dsi_proto_timings(dsidev);
4750 dsi_set_lp_clk_divisor(dsidev);
4753 _dsi_print_reset_status(dsidev);
4755 r = dsi_proto_config(dsidev);
4759 /* enable interface */
4760 dsi_vc_enable(dsidev, 0, 1);
4761 dsi_vc_enable(dsidev, 1, 1);
4762 dsi_vc_enable(dsidev, 2, 1);
4763 dsi_vc_enable(dsidev, 3, 1);
4764 dsi_if_enable(dsidev, 1);
4765 dsi_force_tx_stop_mode_io(dsidev);
4769 dsi_cio_uninit(dsidev);
4771 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4773 dsi_pll_uninit(dsidev, true);
4778 static void dsi_display_uninit_dsi(struct platform_device *dsidev,
4779 bool disconnect_lanes, bool enter_ulps)
4781 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4783 if (enter_ulps && !dsi->ulps_enabled)
4784 dsi_enter_ulps(dsidev);
4786 /* disable interface */
4787 dsi_if_enable(dsidev, 0);
4788 dsi_vc_enable(dsidev, 0, 0);
4789 dsi_vc_enable(dsidev, 1, 0);
4790 dsi_vc_enable(dsidev, 2, 0);
4791 dsi_vc_enable(dsidev, 3, 0);
4793 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4794 dsi_cio_uninit(dsidev);
4795 dsi_pll_uninit(dsidev, disconnect_lanes);
4798 int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
4800 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4801 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4804 DSSDBG("dsi_display_enable\n");
4806 WARN_ON(!dsi_bus_is_locked(dsidev));
4808 mutex_lock(&dsi->lock);
4810 r = omap_dss_start_device(dssdev);
4812 DSSERR("failed to start device\n");
4816 r = dsi_runtime_get(dsidev);
4820 dsi_enable_pll_clock(dsidev, 1);
4822 _dsi_initialize_irq(dsidev);
4824 r = dsi_display_init_dsi(dsidev);
4828 mutex_unlock(&dsi->lock);
4833 dsi_enable_pll_clock(dsidev, 0);
4834 dsi_runtime_put(dsidev);
4836 omap_dss_stop_device(dssdev);
4838 mutex_unlock(&dsi->lock);
4839 DSSDBG("dsi_display_enable FAILED\n");
4842 EXPORT_SYMBOL(omapdss_dsi_display_enable);
4844 void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
4845 bool disconnect_lanes, bool enter_ulps)
4847 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4848 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4850 DSSDBG("dsi_display_disable\n");
4852 WARN_ON(!dsi_bus_is_locked(dsidev));
4854 mutex_lock(&dsi->lock);
4856 dsi_sync_vc(dsidev, 0);
4857 dsi_sync_vc(dsidev, 1);
4858 dsi_sync_vc(dsidev, 2);
4859 dsi_sync_vc(dsidev, 3);
4861 dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
4863 dsi_runtime_put(dsidev);
4864 dsi_enable_pll_clock(dsidev, 0);
4866 omap_dss_stop_device(dssdev);
4868 mutex_unlock(&dsi->lock);
4870 EXPORT_SYMBOL(omapdss_dsi_display_disable);
4872 int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4874 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4875 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4877 dsi->te_enabled = enable;
4880 EXPORT_SYMBOL(omapdss_dsi_enable_te);
4882 int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
4883 const struct omap_dss_dsi_config *config)
4885 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4886 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4888 mutex_lock(&dsi->lock);
4890 dsi->timings = *config->timings;
4891 dsi->vm_timings = *config->vm_timings;
4892 dsi->pix_fmt = config->pixel_format;
4893 dsi->mode = config->mode;
4895 dsi_set_clocks(dssdev, config->hs_clk, config->lp_clk);
4897 mutex_unlock(&dsi->lock);
4901 EXPORT_SYMBOL(omapdss_dsi_set_config);
4904 * Return a hardcoded channel for the DSI output. This should work for
4905 * current use cases, but this can be later expanded to either resolve
4906 * the channel in some more dynamic manner, or get the channel as a user
4909 static enum omap_channel dsi_get_channel(int module_id)
4911 switch (omapdss_get_version()) {
4912 case OMAPDSS_VER_OMAP24xx:
4913 DSSWARN("DSI not supported\n");
4914 return OMAP_DSS_CHANNEL_LCD;
4916 case OMAPDSS_VER_OMAP34xx_ES1:
4917 case OMAPDSS_VER_OMAP34xx_ES3:
4918 case OMAPDSS_VER_OMAP3630:
4919 case OMAPDSS_VER_AM35xx:
4920 return OMAP_DSS_CHANNEL_LCD;
4922 case OMAPDSS_VER_OMAP4430_ES1:
4923 case OMAPDSS_VER_OMAP4430_ES2:
4924 case OMAPDSS_VER_OMAP4:
4925 switch (module_id) {
4927 return OMAP_DSS_CHANNEL_LCD;
4929 return OMAP_DSS_CHANNEL_LCD2;
4931 DSSWARN("unsupported module id\n");
4932 return OMAP_DSS_CHANNEL_LCD;
4935 case OMAPDSS_VER_OMAP5:
4936 switch (module_id) {
4938 return OMAP_DSS_CHANNEL_LCD;
4940 return OMAP_DSS_CHANNEL_LCD3;
4942 DSSWARN("unsupported module id\n");
4943 return OMAP_DSS_CHANNEL_LCD;
4947 DSSWARN("unsupported DSS version\n");
4948 return OMAP_DSS_CHANNEL_LCD;
4952 static int __init dsi_init_display(struct omap_dss_device *dssdev)
4954 struct platform_device *dsidev =
4955 dsi_get_dsidev_from_id(dssdev->phy.dsi.module);
4956 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4958 DSSDBG("DSI init\n");
4960 if (dsi->vdds_dsi_reg == NULL) {
4961 struct regulator *vdds_dsi;
4963 vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi");
4965 /* DT HACK: try VCXIO to make omapdss work for o4 sdp/panda */
4966 if (IS_ERR(vdds_dsi))
4967 vdds_dsi = regulator_get(&dsi->pdev->dev, "VCXIO");
4969 if (IS_ERR(vdds_dsi)) {
4970 DSSERR("can't get VDDS_DSI regulator\n");
4971 return PTR_ERR(vdds_dsi);
4974 dsi->vdds_dsi_reg = vdds_dsi;
4980 int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
4982 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4983 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4986 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
4987 if (!dsi->vc[i].dssdev) {
4988 dsi->vc[i].dssdev = dssdev;
4994 DSSERR("cannot get VC for display %s", dssdev->name);
4997 EXPORT_SYMBOL(omap_dsi_request_vc);
4999 int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
5001 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5002 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5004 if (vc_id < 0 || vc_id > 3) {
5005 DSSERR("VC ID out of range\n");
5009 if (channel < 0 || channel > 3) {
5010 DSSERR("Virtual Channel out of range\n");
5014 if (dsi->vc[channel].dssdev != dssdev) {
5015 DSSERR("Virtual Channel not allocated to display %s\n",
5020 dsi->vc[channel].vc_id = vc_id;
5024 EXPORT_SYMBOL(omap_dsi_set_vc_id);
5026 void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
5028 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
5029 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5031 if ((channel >= 0 && channel <= 3) &&
5032 dsi->vc[channel].dssdev == dssdev) {
5033 dsi->vc[channel].dssdev = NULL;
5034 dsi->vc[channel].vc_id = 0;
5037 EXPORT_SYMBOL(omap_dsi_release_vc);
5039 void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
5041 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1)
5042 DSSERR("%s (%s) not active\n",
5043 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC),
5044 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC));
5047 void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev)
5049 if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1)
5050 DSSERR("%s (%s) not active\n",
5051 dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI),
5052 dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI));
5055 static void dsi_calc_clock_param_ranges(struct platform_device *dsidev)
5057 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5059 dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN);
5060 dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM);
5061 dsi->regm_dispc_max =
5062 dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC);
5063 dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI);
5064 dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT);
5065 dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT);
5066 dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV);
5069 static int dsi_get_clocks(struct platform_device *dsidev)
5071 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5074 clk = clk_get(&dsidev->dev, "fck");
5076 DSSERR("can't get fck\n");
5077 return PTR_ERR(clk);
5082 clk = clk_get(&dsidev->dev, "sys_clk");
5084 DSSERR("can't get sys_clk\n");
5085 clk_put(dsi->dss_clk);
5086 dsi->dss_clk = NULL;
5087 return PTR_ERR(clk);
5095 static void dsi_put_clocks(struct platform_device *dsidev)
5097 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5100 clk_put(dsi->dss_clk);
5102 clk_put(dsi->sys_clk);
5105 static struct omap_dss_device * __init dsi_find_dssdev(struct platform_device *pdev)
5107 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
5108 struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5109 const char *def_disp_name = omapdss_get_default_display_name();
5110 struct omap_dss_device *def_dssdev;
5115 for (i = 0; i < pdata->num_devices; ++i) {
5116 struct omap_dss_device *dssdev = pdata->devices[i];
5118 if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
5121 if (dssdev->phy.dsi.module != dsi->module_id)
5124 if (def_dssdev == NULL)
5125 def_dssdev = dssdev;
5127 if (def_disp_name != NULL &&
5128 strcmp(dssdev->name, def_disp_name) == 0) {
5129 def_dssdev = dssdev;
5137 static void __init dsi_probe_pdata(struct platform_device *dsidev)
5139 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5140 struct omap_dss_device *plat_dssdev;
5141 struct omap_dss_device *dssdev;
5144 plat_dssdev = dsi_find_dssdev(dsidev);
5149 dssdev = dss_alloc_and_init_device(&dsidev->dev);
5153 dss_copy_device_pdata(dssdev, plat_dssdev);
5155 r = dsi_init_display(dssdev);
5157 DSSERR("device %s init failed: %d\n", dssdev->name, r);
5158 dss_put_device(dssdev);
5162 r = omapdss_output_set_device(&dsi->output, dssdev);
5164 DSSERR("failed to connect output to new device: %s\n",
5166 dss_put_device(dssdev);
5170 r = dss_add_device(dssdev);
5172 DSSERR("device %s register failed: %d\n", dssdev->name, r);
5173 omapdss_output_unset_device(&dsi->output);
5174 dss_put_device(dssdev);
5179 static void __init dsi_init_output(struct platform_device *dsidev)
5181 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5182 struct omap_dss_output *out = &dsi->output;
5185 out->id = dsi->module_id == 0 ?
5186 OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
5188 out->type = OMAP_DISPLAY_TYPE_DSI;
5189 out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
5190 out->dispc_channel = dsi_get_channel(dsi->module_id);
5192 dss_register_output(out);
5195 static void __exit dsi_uninit_output(struct platform_device *dsidev)
5197 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5198 struct omap_dss_output *out = &dsi->output;
5200 dss_unregister_output(out);
5203 /* DSI1 HW IP initialisation */
5204 static int __init omap_dsihw_probe(struct platform_device *dsidev)
5208 struct resource *dsi_mem;
5209 struct dsi_data *dsi;
5211 dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
5215 dsi->module_id = dsidev->id;
5217 dev_set_drvdata(&dsidev->dev, dsi);
5219 spin_lock_init(&dsi->irq_lock);
5220 spin_lock_init(&dsi->errors_lock);
5223 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5224 spin_lock_init(&dsi->irq_stats_lock);
5225 dsi->irq_stats.last_reset = jiffies;
5228 mutex_init(&dsi->lock);
5229 sema_init(&dsi->bus_lock, 1);
5231 INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
5232 dsi_framedone_timeout_work_callback);
5234 #ifdef DSI_CATCH_MISSING_TE
5235 init_timer(&dsi->te_timer);
5236 dsi->te_timer.function = dsi_te_timeout;
5237 dsi->te_timer.data = 0;
5239 dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0);
5241 DSSERR("can't get IORESOURCE_MEM DSI\n");
5245 dsi->base = devm_ioremap(&dsidev->dev, dsi_mem->start,
5246 resource_size(dsi_mem));
5248 DSSERR("can't ioremap DSI\n");
5252 dsi->irq = platform_get_irq(dsi->pdev, 0);
5254 DSSERR("platform_get_irq failed\n");
5258 r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
5259 IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
5261 DSSERR("request_irq failed\n");
5265 /* DSI VCs initialization */
5266 for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
5267 dsi->vc[i].source = DSI_VC_SOURCE_L4;
5268 dsi->vc[i].dssdev = NULL;
5269 dsi->vc[i].vc_id = 0;
5272 dsi_calc_clock_param_ranges(dsidev);
5274 r = dsi_get_clocks(dsidev);
5278 pm_runtime_enable(&dsidev->dev);
5280 r = dsi_runtime_get(dsidev);
5282 goto err_runtime_get;
5284 rev = dsi_read_reg(dsidev, DSI_REVISION);
5285 dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
5286 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
5288 /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
5289 * of data to 3 by default */
5290 if (dss_has_feature(FEAT_DSI_GNQ))
5292 dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
5294 dsi->num_lanes_supported = 3;
5296 dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
5298 dsi_init_output(dsidev);
5300 dsi_probe_pdata(dsidev);
5302 dsi_runtime_put(dsidev);
5304 if (dsi->module_id == 0)
5305 dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
5306 else if (dsi->module_id == 1)
5307 dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
5309 #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5310 if (dsi->module_id == 0)
5311 dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
5312 else if (dsi->module_id == 1)
5313 dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
5318 pm_runtime_disable(&dsidev->dev);
5319 dsi_put_clocks(dsidev);
5323 static int __exit omap_dsihw_remove(struct platform_device *dsidev)
5325 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
5327 WARN_ON(dsi->scp_clk_refcount > 0);
5329 dss_unregister_child_devices(&dsidev->dev);
5331 dsi_uninit_output(dsidev);
5333 pm_runtime_disable(&dsidev->dev);
5335 dsi_put_clocks(dsidev);
5337 if (dsi->vdds_dsi_reg != NULL) {
5338 if (dsi->vdds_dsi_enabled) {
5339 regulator_disable(dsi->vdds_dsi_reg);
5340 dsi->vdds_dsi_enabled = false;
5343 regulator_put(dsi->vdds_dsi_reg);
5344 dsi->vdds_dsi_reg = NULL;
5350 static int dsi_runtime_suspend(struct device *dev)
5352 dispc_runtime_put();
5357 static int dsi_runtime_resume(struct device *dev)
5361 r = dispc_runtime_get();
5368 static const struct dev_pm_ops dsi_pm_ops = {
5369 .runtime_suspend = dsi_runtime_suspend,
5370 .runtime_resume = dsi_runtime_resume,
5373 static struct platform_driver omap_dsihw_driver = {
5374 .remove = __exit_p(omap_dsihw_remove),
5376 .name = "omapdss_dsi",
5377 .owner = THIS_MODULE,
5382 int __init dsi_init_platform_driver(void)
5384 return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
5387 void __exit dsi_uninit_platform_driver(void)
5389 platform_driver_unregister(&omap_dsihw_driver);