1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (C) 2000-2001 Deep Blue Solutions
4 // Copyright (C) 2002 Shane Nay (shane@minirl.com)
5 // Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
6 // Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
8 #include <linux/interrupt.h>
10 #include <linux/clockchips.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/sched_clock.h>
15 #include <linux/slab.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
21 * There are 4 versions of the timer hardware on Freescale MXC hardware.
24 * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
25 * - MX6DL, MX6SX, MX6Q(rev1.1+)
28 GPT_TYPE_IMX1, /* i.MX1 */
29 GPT_TYPE_IMX21, /* i.MX21/27 */
30 GPT_TYPE_IMX31, /* i.MX31/35/25/37/51/6Q */
31 GPT_TYPE_IMX6DL, /* i.MX6DL/SX/SL */
34 /* defines common for all i.MX */
36 #define MXC_TCTL_TEN (1 << 0) /* Enable module */
37 #define MXC_TPRER 0x04
40 #define MX1_2_TCTL_CLK_PCLK1 (1 << 1)
41 #define MX1_2_TCTL_IRQEN (1 << 4)
42 #define MX1_2_TCTL_FRR (1 << 8)
43 #define MX1_2_TCMP 0x08
44 #define MX1_2_TCN 0x10
45 #define MX1_2_TSTAT 0x14
48 #define MX2_TSTAT_CAPT (1 << 1)
49 #define MX2_TSTAT_COMP (1 << 0)
51 /* MX31, MX35, MX25, MX5, MX6 */
52 #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
53 #define V2_TCTL_CLK_IPG (1 << 6)
54 #define V2_TCTL_CLK_PER (2 << 6)
55 #define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
56 #define V2_TCTL_FRR (1 << 9)
57 #define V2_TCTL_24MEN (1 << 10)
58 #define V2_TPRER_PRE24M 12
61 #define V2_TSTAT_OF1 (1 << 0)
65 #define V2_TIMER_RATE_OSC_DIV8 3000000
68 enum imx_gpt_type type;
73 const struct imx_gpt_data *gpt;
74 struct clock_event_device ced;
81 void (*gpt_setup_tctl)(struct imx_timer *imxtm);
82 void (*gpt_irq_enable)(struct imx_timer *imxtm);
83 void (*gpt_irq_disable)(struct imx_timer *imxtm);
84 void (*gpt_irq_acknowledge)(struct imx_timer *imxtm);
85 int (*set_next_event)(unsigned long evt,
86 struct clock_event_device *ced);
89 static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced)
91 return container_of(ced, struct imx_timer, ced);
94 static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
98 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
99 writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
102 static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
104 writel_relaxed(0, imxtm->base + V2_IR);
107 static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
111 tmp = readl_relaxed(imxtm->base + MXC_TCTL);
112 writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
115 static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
117 writel_relaxed(1<<0, imxtm->base + V2_IR);
120 static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
122 writel_relaxed(0, imxtm->base + MX1_2_TSTAT);
125 static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm)
127 writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
128 imxtm->base + MX1_2_TSTAT);
131 static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
133 writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
136 static void __iomem *sched_clock_reg;
138 static u64 notrace mxc_read_sched_clock(void)
140 return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
143 #if defined(CONFIG_ARM)
144 static struct delay_timer imx_delay_timer;
146 static unsigned long imx_read_current_timer(void)
148 return readl_relaxed(sched_clock_reg);
152 static int __init mxc_clocksource_init(struct imx_timer *imxtm)
154 unsigned int c = clk_get_rate(imxtm->clk_per);
155 void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
157 #if defined(CONFIG_ARM)
158 imx_delay_timer.read_current_timer = &imx_read_current_timer;
159 imx_delay_timer.freq = c;
160 register_current_timer_delay(&imx_delay_timer);
163 sched_clock_reg = reg;
165 sched_clock_register(mxc_read_sched_clock, 32, c);
166 return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
167 clocksource_mmio_readl_up);
172 static int mx1_2_set_next_event(unsigned long evt,
173 struct clock_event_device *ced)
175 struct imx_timer *imxtm = to_imx_timer(ced);
178 tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt;
180 writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP);
182 return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ?
186 static int v2_set_next_event(unsigned long evt,
187 struct clock_event_device *ced)
189 struct imx_timer *imxtm = to_imx_timer(ced);
192 tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt;
194 writel_relaxed(tcmp, imxtm->base + V2_TCMP);
196 return evt < 0x7fffffff &&
197 (int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ?
201 static int mxc_shutdown(struct clock_event_device *ced)
203 struct imx_timer *imxtm = to_imx_timer(ced);
206 /* Disable interrupt in GPT module */
207 imxtm->gpt->gpt_irq_disable(imxtm);
209 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
210 /* Set event time into far-far future */
211 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
213 /* Clear pending interrupt */
214 imxtm->gpt->gpt_irq_acknowledge(imxtm);
217 printk(KERN_INFO "%s: changing mode\n", __func__);
223 static int mxc_set_oneshot(struct clock_event_device *ced)
225 struct imx_timer *imxtm = to_imx_timer(ced);
227 /* Disable interrupt in GPT module */
228 imxtm->gpt->gpt_irq_disable(imxtm);
230 if (!clockevent_state_oneshot(ced)) {
231 u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
232 /* Set event time into far-far future */
233 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
235 /* Clear pending interrupt */
236 imxtm->gpt->gpt_irq_acknowledge(imxtm);
240 printk(KERN_INFO "%s: changing mode\n", __func__);
244 * Do not put overhead of interrupt enable/disable into
245 * mxc_set_next_event(), the core has about 4 minutes
246 * to call mxc_set_next_event() or shutdown clock after
249 imxtm->gpt->gpt_irq_enable(imxtm);
255 * IRQ handler for the timer
257 static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
259 struct clock_event_device *ced = dev_id;
260 struct imx_timer *imxtm = to_imx_timer(ced);
263 tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
265 imxtm->gpt->gpt_irq_acknowledge(imxtm);
267 ced->event_handler(ced);
272 static int __init mxc_clockevent_init(struct imx_timer *imxtm)
274 struct clock_event_device *ced = &imxtm->ced;
276 ced->name = "mxc_timer1";
277 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
278 ced->set_state_shutdown = mxc_shutdown;
279 ced->set_state_oneshot = mxc_set_oneshot;
280 ced->tick_resume = mxc_shutdown;
281 ced->set_next_event = imxtm->gpt->set_next_event;
283 ced->cpumask = cpumask_of(0);
284 ced->irq = imxtm->irq;
285 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
288 return request_irq(imxtm->irq, mxc_timer_interrupt,
289 IRQF_TIMER | IRQF_IRQPOLL, "i.MX Timer Tick", ced);
292 static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
296 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
297 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
300 static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
304 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
305 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8)
306 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
308 tctl_val |= V2_TCTL_CLK_PER;
310 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
313 static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm)
317 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
318 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) {
319 tctl_val |= V2_TCTL_CLK_OSC_DIV8;
321 writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER);
322 tctl_val |= V2_TCTL_24MEN;
324 tctl_val |= V2_TCTL_CLK_PER;
327 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
330 static const struct imx_gpt_data imx1_gpt_data = {
331 .reg_tstat = MX1_2_TSTAT,
332 .reg_tcn = MX1_2_TCN,
333 .reg_tcmp = MX1_2_TCMP,
334 .gpt_irq_enable = imx1_gpt_irq_enable,
335 .gpt_irq_disable = imx1_gpt_irq_disable,
336 .gpt_irq_acknowledge = imx1_gpt_irq_acknowledge,
337 .gpt_setup_tctl = imx1_gpt_setup_tctl,
338 .set_next_event = mx1_2_set_next_event,
341 static const struct imx_gpt_data imx21_gpt_data = {
342 .reg_tstat = MX1_2_TSTAT,
343 .reg_tcn = MX1_2_TCN,
344 .reg_tcmp = MX1_2_TCMP,
345 .gpt_irq_enable = imx1_gpt_irq_enable,
346 .gpt_irq_disable = imx1_gpt_irq_disable,
347 .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
348 .gpt_setup_tctl = imx1_gpt_setup_tctl,
349 .set_next_event = mx1_2_set_next_event,
352 static const struct imx_gpt_data imx31_gpt_data = {
353 .reg_tstat = V2_TSTAT,
356 .gpt_irq_enable = imx31_gpt_irq_enable,
357 .gpt_irq_disable = imx31_gpt_irq_disable,
358 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
359 .gpt_setup_tctl = imx31_gpt_setup_tctl,
360 .set_next_event = v2_set_next_event,
363 static const struct imx_gpt_data imx6dl_gpt_data = {
364 .reg_tstat = V2_TSTAT,
367 .gpt_irq_enable = imx31_gpt_irq_enable,
368 .gpt_irq_disable = imx31_gpt_irq_disable,
369 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
370 .gpt_setup_tctl = imx6dl_gpt_setup_tctl,
371 .set_next_event = v2_set_next_event,
374 static int __init _mxc_timer_init(struct imx_timer *imxtm)
378 switch (imxtm->type) {
380 imxtm->gpt = &imx1_gpt_data;
383 imxtm->gpt = &imx21_gpt_data;
386 imxtm->gpt = &imx31_gpt_data;
388 case GPT_TYPE_IMX6DL:
389 imxtm->gpt = &imx6dl_gpt_data;
395 if (IS_ERR(imxtm->clk_per)) {
396 pr_err("i.MX timer: unable to get clk\n");
397 return PTR_ERR(imxtm->clk_per);
400 if (!IS_ERR(imxtm->clk_ipg))
401 clk_prepare_enable(imxtm->clk_ipg);
403 clk_prepare_enable(imxtm->clk_per);
406 * Initialise to a known state (all timers off, and timing reset)
409 writel_relaxed(0, imxtm->base + MXC_TCTL);
410 writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */
412 imxtm->gpt->gpt_setup_tctl(imxtm);
414 /* init and register the timer to the framework */
415 ret = mxc_clocksource_init(imxtm);
419 return mxc_clockevent_init(imxtm);
422 static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type)
424 struct imx_timer *imxtm;
425 static int initialized;
428 /* Support one instance only */
432 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
436 imxtm->base = of_iomap(np, 0);
440 imxtm->irq = irq_of_parse_and_map(np, 0);
444 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
446 /* Try osc_per first, and fall back to per otherwise */
447 imxtm->clk_per = of_clk_get_by_name(np, "osc_per");
448 if (IS_ERR(imxtm->clk_per))
449 imxtm->clk_per = of_clk_get_by_name(np, "per");
453 ret = _mxc_timer_init(imxtm);
462 static int __init imx1_timer_init_dt(struct device_node *np)
464 return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
467 static int __init imx21_timer_init_dt(struct device_node *np)
469 return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
472 static int __init imx31_timer_init_dt(struct device_node *np)
474 enum imx_gpt_type type = GPT_TYPE_IMX31;
477 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S
478 * GPT device, while they actually have different programming model.
479 * This is a workaround to keep the existing i.MX6DL/S DTBs continue
480 * working with the new kernel.
482 if (of_machine_is_compatible("fsl,imx6dl"))
483 type = GPT_TYPE_IMX6DL;
485 return mxc_timer_init_dt(np, type);
488 static int __init imx6dl_timer_init_dt(struct device_node *np)
490 return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
493 TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
494 TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
495 TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
496 TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
497 TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
498 TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
499 TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
500 TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
501 TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
502 TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
503 TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
504 TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);