2 * SuperH Timer Support - TMU
4 * Copyright (C) 2009 Magnus Damm
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/init.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/delay.h>
27 #include <linux/clk.h>
28 #include <linux/irq.h>
29 #include <linux/err.h>
30 #include <linux/clocksource.h>
31 #include <linux/clockchips.h>
32 #include <linux/sh_timer.h>
33 #include <linux/slab.h>
34 #include <linux/module.h>
35 #include <linux/pm_domain.h>
36 #include <linux/pm_runtime.h>
40 struct sh_tmu_channel {
41 struct sh_tmu_device *tmu;
48 unsigned long periodic;
49 struct clock_event_device ced;
50 struct clocksource cs;
52 unsigned int enable_count;
55 struct sh_tmu_device {
56 struct platform_device *pdev;
58 void __iomem *mapbase;
61 struct sh_tmu_channel channel;
64 static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
66 #define TSTR -1 /* shared register */
67 #define TCOR 0 /* channel register */
68 #define TCNT 1 /* channel register */
69 #define TCR 2 /* channel register */
71 static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
76 return ioread8(ch->tmu->mapbase);
81 return ioread16(ch->base + offs);
83 return ioread32(ch->base + offs);
86 static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
92 iowrite8(value, ch->tmu->mapbase);
99 iowrite16(value, ch->base + offs);
101 iowrite32(value, ch->base + offs);
104 static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
106 unsigned long flags, value;
108 /* start stop register shared by multiple timer channels */
109 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
110 value = sh_tmu_read(ch, TSTR);
113 value |= 1 << ch->index;
115 value &= ~(1 << ch->index);
117 sh_tmu_write(ch, TSTR, value);
118 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
121 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
126 ret = clk_enable(ch->tmu->clk);
128 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
133 /* make sure channel is disabled */
134 sh_tmu_start_stop_ch(ch, 0);
136 /* maximum timeout */
137 sh_tmu_write(ch, TCOR, 0xffffffff);
138 sh_tmu_write(ch, TCNT, 0xffffffff);
140 /* configure channel to parent clock / 4, irq off */
141 ch->rate = clk_get_rate(ch->tmu->clk) / 4;
142 sh_tmu_write(ch, TCR, 0x0000);
145 sh_tmu_start_stop_ch(ch, 1);
150 static int sh_tmu_enable(struct sh_tmu_channel *ch)
152 if (ch->enable_count++ > 0)
155 pm_runtime_get_sync(&ch->tmu->pdev->dev);
156 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
158 return __sh_tmu_enable(ch);
161 static void __sh_tmu_disable(struct sh_tmu_channel *ch)
163 /* disable channel */
164 sh_tmu_start_stop_ch(ch, 0);
166 /* disable interrupts in TMU block */
167 sh_tmu_write(ch, TCR, 0x0000);
170 clk_disable(ch->tmu->clk);
173 static void sh_tmu_disable(struct sh_tmu_channel *ch)
175 if (WARN_ON(ch->enable_count == 0))
178 if (--ch->enable_count > 0)
181 __sh_tmu_disable(ch);
183 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
184 pm_runtime_put(&ch->tmu->pdev->dev);
187 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
191 sh_tmu_start_stop_ch(ch, 0);
193 /* acknowledge interrupt */
194 sh_tmu_read(ch, TCR);
196 /* enable interrupt */
197 sh_tmu_write(ch, TCR, 0x0020);
199 /* reload delta value in case of periodic timer */
201 sh_tmu_write(ch, TCOR, delta);
203 sh_tmu_write(ch, TCOR, 0xffffffff);
205 sh_tmu_write(ch, TCNT, delta);
208 sh_tmu_start_stop_ch(ch, 1);
211 static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
213 struct sh_tmu_channel *ch = dev_id;
215 /* disable or acknowledge interrupt */
216 if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
217 sh_tmu_write(ch, TCR, 0x0000);
219 sh_tmu_write(ch, TCR, 0x0020);
221 /* notify clockevent layer */
222 ch->ced.event_handler(&ch->ced);
226 static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
228 return container_of(cs, struct sh_tmu_channel, cs);
231 static cycle_t sh_tmu_clocksource_read(struct clocksource *cs)
233 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
235 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
238 static int sh_tmu_clocksource_enable(struct clocksource *cs)
240 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
243 if (WARN_ON(ch->cs_enabled))
246 ret = sh_tmu_enable(ch);
248 __clocksource_updatefreq_hz(cs, ch->rate);
249 ch->cs_enabled = true;
255 static void sh_tmu_clocksource_disable(struct clocksource *cs)
257 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
259 if (WARN_ON(!ch->cs_enabled))
263 ch->cs_enabled = false;
266 static void sh_tmu_clocksource_suspend(struct clocksource *cs)
268 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
273 if (--ch->enable_count == 0) {
274 __sh_tmu_disable(ch);
275 pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev);
279 static void sh_tmu_clocksource_resume(struct clocksource *cs)
281 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
286 if (ch->enable_count++ == 0) {
287 pm_genpd_syscore_poweron(&ch->tmu->pdev->dev);
292 static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
293 const char *name, unsigned long rating)
295 struct clocksource *cs = &ch->cs;
299 cs->read = sh_tmu_clocksource_read;
300 cs->enable = sh_tmu_clocksource_enable;
301 cs->disable = sh_tmu_clocksource_disable;
302 cs->suspend = sh_tmu_clocksource_suspend;
303 cs->resume = sh_tmu_clocksource_resume;
304 cs->mask = CLOCKSOURCE_MASK(32);
305 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
307 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
310 /* Register with dummy 1 Hz value, gets updated in ->enable() */
311 clocksource_register_hz(cs, 1);
315 static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
317 return container_of(ced, struct sh_tmu_channel, ced);
320 static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
322 struct clock_event_device *ced = &ch->ced;
326 clockevents_config(ced, ch->rate);
329 ch->periodic = (ch->rate + HZ/2) / HZ;
330 sh_tmu_set_next(ch, ch->periodic, 1);
334 static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
335 struct clock_event_device *ced)
337 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
340 /* deal with old setting first */
342 case CLOCK_EVT_MODE_PERIODIC:
343 case CLOCK_EVT_MODE_ONESHOT:
352 case CLOCK_EVT_MODE_PERIODIC:
353 dev_info(&ch->tmu->pdev->dev,
354 "ch%u: used for periodic clock events\n", ch->index);
355 sh_tmu_clock_event_start(ch, 1);
357 case CLOCK_EVT_MODE_ONESHOT:
358 dev_info(&ch->tmu->pdev->dev,
359 "ch%u: used for oneshot clock events\n", ch->index);
360 sh_tmu_clock_event_start(ch, 0);
362 case CLOCK_EVT_MODE_UNUSED:
366 case CLOCK_EVT_MODE_SHUTDOWN:
372 static int sh_tmu_clock_event_next(unsigned long delta,
373 struct clock_event_device *ced)
375 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
377 BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
379 /* program new delta value */
380 sh_tmu_set_next(ch, delta, 0);
384 static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
386 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
389 static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
391 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
394 static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
395 const char *name, unsigned long rating)
397 struct clock_event_device *ced = &ch->ced;
401 ced->features = CLOCK_EVT_FEAT_PERIODIC;
402 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
403 ced->rating = rating;
404 ced->cpumask = cpumask_of(0);
405 ced->set_next_event = sh_tmu_clock_event_next;
406 ced->set_mode = sh_tmu_clock_event_mode;
407 ced->suspend = sh_tmu_clock_event_suspend;
408 ced->resume = sh_tmu_clock_event_resume;
410 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
413 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
415 ret = request_irq(ch->irq, sh_tmu_interrupt,
416 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
417 dev_name(&ch->tmu->pdev->dev), ch);
419 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
425 static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
426 unsigned long clockevent_rating,
427 unsigned long clocksource_rating)
429 if (clockevent_rating)
430 sh_tmu_register_clockevent(ch, name, clockevent_rating);
431 else if (clocksource_rating)
432 sh_tmu_register_clocksource(ch, name, clocksource_rating);
437 static int sh_tmu_channel_setup(struct sh_tmu_channel *ch,
438 struct sh_tmu_device *tmu)
440 struct sh_timer_config *cfg = tmu->pdev->dev.platform_data;
445 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps channel
446 * registers blocks at base + 2 + 12 * index, while all other variants
447 * map them at base + 4 + 12 * index. We can compute the index by just
448 * dividing by 12, the 2 bytes or 4 bytes offset being hidden by the
451 ch->index = cfg->channel_offset / 12;
453 ch->irq = platform_get_irq(tmu->pdev, 0);
455 dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n",
460 ch->cs_enabled = false;
461 ch->enable_count = 0;
463 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
464 cfg->clockevent_rating,
465 cfg->clocksource_rating);
468 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
470 struct sh_timer_config *cfg = pdev->dev.platform_data;
471 struct resource *res;
478 dev_err(&tmu->pdev->dev, "missing platform data\n");
482 platform_set_drvdata(pdev, tmu);
484 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
486 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
491 * Map memory, let channel.base point to our channel and mapbase to the
492 * start/stop shared register.
494 tmu->channel.base = ioremap_nocache(res->start, resource_size(res));
495 if (tmu->channel.base == NULL) {
496 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
500 tmu->mapbase = tmu->channel.base - cfg->channel_offset;
502 /* get hold of clock */
503 tmu->clk = clk_get(&tmu->pdev->dev, "tmu_fck");
504 if (IS_ERR(tmu->clk)) {
505 dev_err(&tmu->pdev->dev, "cannot get clock\n");
506 ret = PTR_ERR(tmu->clk);
510 ret = clk_prepare(tmu->clk);
514 ret = sh_tmu_channel_setup(&tmu->channel, tmu);
521 clk_unprepare(tmu->clk);
525 iounmap(tmu->channel.base);
530 static int sh_tmu_probe(struct platform_device *pdev)
532 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
533 struct sh_timer_config *cfg = pdev->dev.platform_data;
536 if (!is_early_platform_device(pdev)) {
537 pm_runtime_set_active(&pdev->dev);
538 pm_runtime_enable(&pdev->dev);
542 dev_info(&pdev->dev, "kept as earlytimer\n");
546 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
548 dev_err(&pdev->dev, "failed to allocate driver data\n");
552 ret = sh_tmu_setup(tmu, pdev);
555 pm_runtime_idle(&pdev->dev);
558 if (is_early_platform_device(pdev))
562 if (cfg->clockevent_rating || cfg->clocksource_rating)
563 pm_runtime_irq_safe(&pdev->dev);
565 pm_runtime_idle(&pdev->dev);
570 static int sh_tmu_remove(struct platform_device *pdev)
572 return -EBUSY; /* cannot unregister clockevent and clocksource */
575 static struct platform_driver sh_tmu_device_driver = {
576 .probe = sh_tmu_probe,
577 .remove = sh_tmu_remove,
583 static int __init sh_tmu_init(void)
585 return platform_driver_register(&sh_tmu_device_driver);
588 static void __exit sh_tmu_exit(void)
590 platform_driver_unregister(&sh_tmu_device_driver);
593 early_platform_init("earlytimer", &sh_tmu_device_driver);
594 subsys_initcall(sh_tmu_init);
595 module_exit(sh_tmu_exit);
597 MODULE_AUTHOR("Magnus Damm");
598 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
599 MODULE_LICENSE("GPL v2");