2 * linux/drivers/clocksource/arm_arch_timer.c
4 * Copyright (C) 2011 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) "arm_arch_timer: " fmt
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
28 #include <linux/sched_clock.h>
29 #include <linux/acpi.h>
31 #include <asm/arch_timer.h>
34 #include <clocksource/arm_arch_timer.h>
37 #define pr_fmt(fmt) "arch_timer: " fmt
40 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
42 #define CNTACR(n) (0x40 + ((n) * 4))
43 #define CNTACR_RPCT BIT(0)
44 #define CNTACR_RVCT BIT(1)
45 #define CNTACR_RFRQ BIT(2)
46 #define CNTACR_RVOFF BIT(3)
47 #define CNTACR_RWVT BIT(4)
48 #define CNTACR_RWPT BIT(5)
50 #define CNTVCT_LO 0x08
51 #define CNTVCT_HI 0x0c
53 #define CNTP_TVAL 0x28
55 #define CNTV_TVAL 0x38
58 static unsigned arch_timers_present __initdata;
60 static void __iomem *arch_counter_base;
64 struct clock_event_device evt;
67 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
69 static u32 arch_timer_rate;
70 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
72 static struct clock_event_device __percpu *arch_timer_evt;
74 static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
75 static bool arch_timer_c3stop;
76 static bool arch_timer_mem_use_virtual;
77 static bool arch_counter_suspend_stop;
78 static bool vdso_default = true;
80 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
81 static bool mct_enable = IS_ENABLED(CONFIG_CLKSRC_EXYNOS_MCT);
83 static int __init early_evtstrm_cfg(char *buf)
85 return strtobool(buf, &evtstrm_enable);
87 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
90 * Architected system timer support.
93 static __always_inline
94 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
95 struct clock_event_device *clk)
97 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
98 struct arch_timer *timer = to_arch_timer(clk);
100 case ARCH_TIMER_REG_CTRL:
101 writel_relaxed(val, timer->base + CNTP_CTL);
103 case ARCH_TIMER_REG_TVAL:
104 writel_relaxed(val, timer->base + CNTP_TVAL);
107 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
108 struct arch_timer *timer = to_arch_timer(clk);
110 case ARCH_TIMER_REG_CTRL:
111 writel_relaxed(val, timer->base + CNTV_CTL);
113 case ARCH_TIMER_REG_TVAL:
114 writel_relaxed(val, timer->base + CNTV_TVAL);
118 arch_timer_reg_write_cp15(access, reg, val);
122 static __always_inline
123 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
124 struct clock_event_device *clk)
128 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
129 struct arch_timer *timer = to_arch_timer(clk);
131 case ARCH_TIMER_REG_CTRL:
132 val = readl_relaxed(timer->base + CNTP_CTL);
134 case ARCH_TIMER_REG_TVAL:
135 val = readl_relaxed(timer->base + CNTP_TVAL);
138 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
139 struct arch_timer *timer = to_arch_timer(clk);
141 case ARCH_TIMER_REG_CTRL:
142 val = readl_relaxed(timer->base + CNTV_CTL);
144 case ARCH_TIMER_REG_TVAL:
145 val = readl_relaxed(timer->base + CNTV_TVAL);
149 val = arch_timer_reg_read_cp15(access, reg);
156 * Default to cp15 based access because arm64 uses this function for
157 * sched_clock() before DT is probed and the cp15 method is guaranteed
158 * to exist on arm64. arm doesn't use this before DT is probed so even
159 * if we don't have the cp15 accessors we won't have a problem.
161 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
163 static u64 arch_counter_read(struct clocksource *cs)
165 return arch_timer_read_counter();
168 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
170 return arch_timer_read_counter();
173 static struct clocksource clocksource_counter = {
174 .name = "arch_sys_counter",
176 .read = arch_counter_read,
177 .mask = CLOCKSOURCE_MASK(56),
178 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
181 static struct cyclecounter cyclecounter __ro_after_init = {
182 .read = arch_counter_read_cc,
183 .mask = CLOCKSOURCE_MASK(56),
186 struct ate_acpi_oem_info {
187 char oem_id[ACPI_OEM_ID_SIZE + 1];
188 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
192 #ifdef CONFIG_FSL_ERRATUM_A008585
194 * The number of retries is an arbitrary value well beyond the highest number
195 * of iterations the loop has been observed to take.
197 #define __fsl_a008585_read_reg(reg) ({ \
199 int _retries = 200; \
202 _old = read_sysreg(reg); \
203 _new = read_sysreg(reg); \
205 } while (unlikely(_old != _new) && _retries); \
207 WARN_ON_ONCE(!_retries); \
211 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
213 return __fsl_a008585_read_reg(cntp_tval_el0);
216 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
218 return __fsl_a008585_read_reg(cntv_tval_el0);
221 static u64 notrace fsl_a008585_read_cntvct_el0(void)
223 return __fsl_a008585_read_reg(cntvct_el0);
227 #ifdef CONFIG_HISILICON_ERRATUM_161010101
229 * Verify whether the value of the second read is larger than the first by
230 * less than 32 is the only way to confirm the value is correct, so clear the
231 * lower 5 bits to check whether the difference is greater than 32 or not.
232 * Theoretically the erratum should not occur more than twice in succession
233 * when reading the system counter, but it is possible that some interrupts
234 * may lead to more than twice read errors, triggering the warning, so setting
235 * the number of retries far beyond the number of iterations the loop has been
238 #define __hisi_161010101_read_reg(reg) ({ \
243 _old = read_sysreg(reg); \
244 _new = read_sysreg(reg); \
246 } while (unlikely((_new - _old) >> 5) && _retries); \
248 WARN_ON_ONCE(!_retries); \
252 static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
254 return __hisi_161010101_read_reg(cntp_tval_el0);
257 static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
259 return __hisi_161010101_read_reg(cntv_tval_el0);
262 static u64 notrace hisi_161010101_read_cntvct_el0(void)
264 return __hisi_161010101_read_reg(cntvct_el0);
267 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
269 * Note that trailing spaces are required to properly match
270 * the OEM table information.
274 .oem_table_id = "HIP05 ",
279 .oem_table_id = "HIP06 ",
284 .oem_table_id = "HIP07 ",
287 { /* Sentinel indicating the end of the OEM array */ },
291 #ifdef CONFIG_ARM64_ERRATUM_858921
292 static u64 notrace arm64_858921_read_cntvct_el0(void)
296 old = read_sysreg(cntvct_el0);
297 new = read_sysreg(cntvct_el0);
298 return (((old ^ new) >> 32) & 1) ? old : new;
302 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
303 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
304 timer_unstable_counter_workaround);
305 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
307 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
308 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
310 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
311 struct clock_event_device *clk)
314 u64 cval = evt + arch_counter_get_cntvct();
316 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
317 ctrl |= ARCH_TIMER_CTRL_ENABLE;
318 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
320 if (access == ARCH_TIMER_PHYS_ACCESS)
321 write_sysreg(cval, cntp_cval_el0);
323 write_sysreg(cval, cntv_cval_el0);
325 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
328 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
329 struct clock_event_device *clk)
331 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
335 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
336 struct clock_event_device *clk)
338 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
342 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
343 #ifdef CONFIG_FSL_ERRATUM_A008585
345 .match_type = ate_match_dt,
346 .id = "fsl,erratum-a008585",
347 .desc = "Freescale erratum a005858",
348 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
349 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
350 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
351 .set_next_event_phys = erratum_set_next_event_tval_phys,
352 .set_next_event_virt = erratum_set_next_event_tval_virt,
355 #ifdef CONFIG_HISILICON_ERRATUM_161010101
357 .match_type = ate_match_dt,
358 .id = "hisilicon,erratum-161010101",
359 .desc = "HiSilicon erratum 161010101",
360 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
361 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
362 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
363 .set_next_event_phys = erratum_set_next_event_tval_phys,
364 .set_next_event_virt = erratum_set_next_event_tval_virt,
367 .match_type = ate_match_acpi_oem_info,
368 .id = hisi_161010101_oem_info,
369 .desc = "HiSilicon erratum 161010101",
370 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
371 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
372 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
373 .set_next_event_phys = erratum_set_next_event_tval_phys,
374 .set_next_event_virt = erratum_set_next_event_tval_virt,
377 #ifdef CONFIG_ARM64_ERRATUM_858921
379 .match_type = ate_match_local_cap_id,
380 .id = (void *)ARM64_WORKAROUND_858921,
381 .desc = "ARM erratum 858921",
382 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
387 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
391 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
394 const struct device_node *np = arg;
396 return of_property_read_bool(np, wa->id);
400 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
403 return this_cpu_has_cap((uintptr_t)wa->id);
408 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
411 static const struct ate_acpi_oem_info empty_oem_info = {};
412 const struct ate_acpi_oem_info *info = wa->id;
413 const struct acpi_table_header *table = arg;
415 /* Iterate over the ACPI OEM info array, looking for a match */
416 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
417 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
418 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
419 info->oem_revision == table->oem_revision)
428 static const struct arch_timer_erratum_workaround *
429 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
430 ate_match_fn_t match_fn,
435 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
436 if (ool_workarounds[i].match_type != type)
439 if (match_fn(&ool_workarounds[i], arg))
440 return &ool_workarounds[i];
447 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
453 __this_cpu_write(timer_unstable_counter_workaround, wa);
455 for_each_possible_cpu(i)
456 per_cpu(timer_unstable_counter_workaround, i) = wa;
460 * Use the locked version, as we're called from the CPU
461 * hotplug framework. Otherwise, we end-up in deadlock-land.
463 static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
466 * Don't use the vdso fastpath if errata require using the
467 * out-of-line counter accessor. We may change our mind pretty
468 * late in the game (with a per-CPU erratum, for example), so
469 * change both the default value and the vdso itself.
471 if (wa->read_cntvct_el0) {
472 clocksource_counter.archdata.vdso_direct = false;
473 vdso_default = false;
477 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
480 const struct arch_timer_erratum_workaround *wa;
481 ate_match_fn_t match_fn = NULL;
486 match_fn = arch_timer_check_dt_erratum;
488 case ate_match_local_cap_id:
489 match_fn = arch_timer_check_local_cap_erratum;
492 case ate_match_acpi_oem_info:
493 match_fn = arch_timer_check_acpi_oem_erratum;
500 wa = arch_timer_iterate_errata(type, match_fn, arg);
504 if (needs_unstable_timer_counter_workaround()) {
505 const struct arch_timer_erratum_workaround *__wa;
506 __wa = __this_cpu_read(timer_unstable_counter_workaround);
507 if (__wa && wa != __wa)
508 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
509 wa->desc, __wa->desc);
515 arch_timer_enable_workaround(wa, local);
516 pr_info("Enabling %s workaround for %s\n",
517 local ? "local" : "global", wa->desc);
520 #define erratum_handler(fn, r, ...) \
523 if (needs_unstable_timer_counter_workaround()) { \
524 const struct arch_timer_erratum_workaround *__wa; \
525 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
526 if (__wa && __wa->fn) { \
527 r = __wa->fn(__VA_ARGS__); \
538 static bool arch_timer_this_cpu_has_cntvct_wa(void)
540 const struct arch_timer_erratum_workaround *wa;
542 wa = __this_cpu_read(timer_unstable_counter_workaround);
543 return wa && wa->read_cntvct_el0;
546 #define arch_timer_check_ool_workaround(t,a) do { } while(0)
547 #define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
548 #define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
549 #define erratum_handler(fn, r, ...) ({false;})
550 #define arch_timer_this_cpu_has_cntvct_wa() ({false;})
551 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
553 static __always_inline irqreturn_t timer_handler(const int access,
554 struct clock_event_device *evt)
558 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
559 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
560 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
561 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
562 evt->event_handler(evt);
569 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
571 struct clock_event_device *evt = dev_id;
573 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
576 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
578 struct clock_event_device *evt = dev_id;
580 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
583 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
585 struct clock_event_device *evt = dev_id;
587 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
590 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
592 struct clock_event_device *evt = dev_id;
594 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
597 static __always_inline int timer_shutdown(const int access,
598 struct clock_event_device *clk)
602 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
603 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
604 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
609 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
611 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
614 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
616 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
619 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
621 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
624 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
626 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
629 static __always_inline void set_next_event(const int access, unsigned long evt,
630 struct clock_event_device *clk)
633 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
634 ctrl |= ARCH_TIMER_CTRL_ENABLE;
635 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
636 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
637 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
640 static int arch_timer_set_next_event_virt(unsigned long evt,
641 struct clock_event_device *clk)
645 if (erratum_handler(set_next_event_virt, ret, evt, clk))
648 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
652 static int arch_timer_set_next_event_phys(unsigned long evt,
653 struct clock_event_device *clk)
657 if (erratum_handler(set_next_event_phys, ret, evt, clk))
660 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
664 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
665 struct clock_event_device *clk)
667 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
671 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
672 struct clock_event_device *clk)
674 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
678 static void __arch_timer_setup(unsigned type,
679 struct clock_event_device *clk)
681 clk->features = CLOCK_EVT_FEAT_ONESHOT;
683 if (type == ARCH_TIMER_TYPE_CP15) {
684 if (arch_timer_c3stop)
685 clk->features |= CLOCK_EVT_FEAT_C3STOP;
686 clk->name = "arch_sys_timer";
688 clk->cpumask = cpumask_of(smp_processor_id());
689 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
690 switch (arch_timer_uses_ppi) {
691 case ARCH_TIMER_VIRT_PPI:
692 clk->set_state_shutdown = arch_timer_shutdown_virt;
693 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
694 clk->set_next_event = arch_timer_set_next_event_virt;
696 case ARCH_TIMER_PHYS_SECURE_PPI:
697 case ARCH_TIMER_PHYS_NONSECURE_PPI:
698 case ARCH_TIMER_HYP_PPI:
699 clk->set_state_shutdown = arch_timer_shutdown_phys;
700 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
701 clk->set_next_event = arch_timer_set_next_event_phys;
707 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
709 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
710 clk->name = "arch_mem_timer";
712 clk->cpumask = cpu_all_mask;
713 if (arch_timer_mem_use_virtual) {
714 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
715 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
716 clk->set_next_event =
717 arch_timer_set_next_event_virt_mem;
719 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
720 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
721 clk->set_next_event =
722 arch_timer_set_next_event_phys_mem;
726 clk->set_state_shutdown(clk);
728 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
731 static void arch_timer_evtstrm_enable(int divider)
733 u32 cntkctl = arch_timer_get_cntkctl();
735 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
736 /* Set the divider and enable virtual event stream */
737 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
738 | ARCH_TIMER_VIRT_EVT_EN;
739 arch_timer_set_cntkctl(cntkctl);
740 elf_hwcap |= HWCAP_EVTSTRM;
742 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
746 static void arch_timer_configure_evtstream(void)
748 int evt_stream_div, pos;
750 /* Find the closest power of two to the divisor */
751 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
752 pos = fls(evt_stream_div);
753 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
755 /* enable event stream */
756 arch_timer_evtstrm_enable(min(pos, 15));
759 static void arch_counter_set_user_access(void)
761 u32 cntkctl = arch_timer_get_cntkctl();
763 /* Disable user access to the timers and both counters */
764 /* Also disable virtual event stream */
765 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
766 | ARCH_TIMER_USR_VT_ACCESS_EN
767 | ARCH_TIMER_USR_VCT_ACCESS_EN
768 | ARCH_TIMER_VIRT_EVT_EN
769 | ARCH_TIMER_USR_PCT_ACCESS_EN);
772 * Enable user access to the virtual counter if it doesn't
773 * need to be workaround. The vdso may have been already
776 if (arch_timer_this_cpu_has_cntvct_wa())
777 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
779 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
781 arch_timer_set_cntkctl(cntkctl);
784 static bool arch_timer_has_nonsecure_ppi(void)
786 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
787 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
790 static u32 check_ppi_trigger(int irq)
792 u32 flags = irq_get_trigger_type(irq);
794 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
795 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
796 pr_warn("WARNING: Please fix your firmware\n");
797 flags = IRQF_TRIGGER_LOW;
803 static int arch_timer_starting_cpu(unsigned int cpu)
805 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
808 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
810 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
811 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
813 if (arch_timer_has_nonsecure_ppi()) {
814 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
815 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
819 arch_counter_set_user_access();
821 arch_timer_configure_evtstream();
827 * For historical reasons, when probing with DT we use whichever (non-zero)
828 * rate was probed first, and don't verify that others match. If the first node
829 * probed has a clock-frequency property, this overrides the HW register.
831 static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
833 /* Who has more than one independent system counter? */
837 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
838 arch_timer_rate = rate;
840 /* Check the timer frequency. */
841 if (arch_timer_rate == 0)
842 pr_warn("frequency not available\n");
845 static void arch_timer_banner(unsigned type)
847 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
848 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
849 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
851 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
852 (unsigned long)arch_timer_rate / 1000000,
853 (unsigned long)(arch_timer_rate / 10000) % 100,
854 type & ARCH_TIMER_TYPE_CP15 ?
855 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
857 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
858 type & ARCH_TIMER_TYPE_MEM ?
859 arch_timer_mem_use_virtual ? "virt" : "phys" :
863 u32 arch_timer_get_rate(void)
865 return arch_timer_rate;
868 static u64 arch_counter_get_cntvct_mem(void)
870 u32 vct_lo, vct_hi, tmp_hi;
873 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
874 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
875 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
876 } while (vct_hi != tmp_hi);
878 return ((u64) vct_hi << 32) | vct_lo;
881 static struct arch_timer_kvm_info arch_timer_kvm_info;
883 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
885 return &arch_timer_kvm_info;
888 static void __init arch_counter_register(unsigned type)
892 /* Register the CP15 based counter if we have one */
893 if (type & ARCH_TIMER_TYPE_CP15) {
894 if (IS_ENABLED(CONFIG_ARM64) ||
895 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
896 arch_timer_read_counter = arch_counter_get_cntpct;
898 arch_timer_read_counter = arch_counter_get_cntpct;
900 clocksource_counter.archdata.vdso_direct = vdso_default;
902 arch_timer_read_counter = arch_counter_get_cntvct_mem;
905 if (!arch_counter_suspend_stop)
906 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
907 start_count = arch_timer_read_counter();
908 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
909 cyclecounter.mult = clocksource_counter.mult;
910 cyclecounter.shift = clocksource_counter.shift;
911 timecounter_init(&arch_timer_kvm_info.timecounter,
912 &cyclecounter, start_count);
914 /* 56 bits minimum, so we assume worst case rollover */
915 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
918 static void arch_timer_stop(struct clock_event_device *clk)
920 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
922 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
923 if (arch_timer_has_nonsecure_ppi())
924 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
926 clk->set_state_shutdown(clk);
929 static int arch_timer_dying_cpu(unsigned int cpu)
931 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
933 arch_timer_stop(clk);
938 static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
939 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
940 unsigned long action, void *hcpu)
942 if (action == CPU_PM_ENTER)
943 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
944 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
945 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
949 static struct notifier_block arch_timer_cpu_pm_notifier = {
950 .notifier_call = arch_timer_cpu_pm_notify,
953 static int __init arch_timer_cpu_pm_init(void)
955 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
958 static void __init arch_timer_cpu_pm_deinit(void)
960 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
964 static int __init arch_timer_cpu_pm_init(void)
969 static void __init arch_timer_cpu_pm_deinit(void)
974 static int __init arch_timer_register(void)
979 arch_timer_evt = alloc_percpu(struct clock_event_device);
980 if (!arch_timer_evt) {
985 ppi = arch_timer_ppi[arch_timer_uses_ppi];
986 switch (arch_timer_uses_ppi) {
987 case ARCH_TIMER_VIRT_PPI:
988 err = request_percpu_irq(ppi, arch_timer_handler_virt,
989 "arch_timer", arch_timer_evt);
991 case ARCH_TIMER_PHYS_SECURE_PPI:
992 case ARCH_TIMER_PHYS_NONSECURE_PPI:
993 err = request_percpu_irq(ppi, arch_timer_handler_phys,
994 "arch_timer", arch_timer_evt);
995 if (!err && arch_timer_has_nonsecure_ppi()) {
996 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
997 err = request_percpu_irq(ppi, arch_timer_handler_phys,
998 "arch_timer", arch_timer_evt);
1000 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1004 case ARCH_TIMER_HYP_PPI:
1005 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1006 "arch_timer", arch_timer_evt);
1013 pr_err("can't register interrupt %d (%d)\n", ppi, err);
1017 err = arch_timer_cpu_pm_init();
1019 goto out_unreg_notify;
1024 * FIXME: The arm64 architecture enables the arm_arch_timer always
1025 * even if arm_arch_timer is not stable. When Exynos5433 uses the
1026 * arm_arch_timer, it fails to enable/disble the secondary cpu.
1027 * To fix the hotplug issue of secondary cpu, if Exynos's MCT timer
1028 * is enabled, arm_arch_timer doesn't register the clockevent
1033 /* Register and immediately configure the timer on the boot CPU */
1034 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1035 "clockevents/arm/arch_timer:starting",
1036 arch_timer_starting_cpu, arch_timer_dying_cpu);
1038 goto out_unreg_cpupm;
1044 arch_timer_cpu_pm_deinit();
1047 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1048 if (arch_timer_has_nonsecure_ppi())
1049 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1053 free_percpu(arch_timer_evt);
1058 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1062 struct arch_timer *t;
1064 t = kzalloc(sizeof(*t), GFP_KERNEL);
1070 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1072 if (arch_timer_mem_use_virtual)
1073 func = arch_timer_handler_virt_mem;
1075 func = arch_timer_handler_phys_mem;
1077 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1079 pr_err("Failed to request mem timer irq\n");
1086 static const struct of_device_id arch_timer_of_match[] __initconst = {
1087 { .compatible = "arm,armv7-timer", },
1088 { .compatible = "arm,armv8-timer", },
1092 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1093 { .compatible = "arm,armv7-timer-mem", },
1097 static bool __init arch_timer_needs_of_probing(void)
1099 struct device_node *dn;
1100 bool needs_probing = false;
1101 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1103 /* We have two timers, and both device-tree nodes are probed. */
1104 if ((arch_timers_present & mask) == mask)
1108 * Only one type of timer is probed,
1109 * check if we have another type of timer node in device-tree.
1111 if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1112 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1114 dn = of_find_matching_node(NULL, arch_timer_of_match);
1116 if (dn && of_device_is_available(dn))
1117 needs_probing = true;
1121 return needs_probing;
1124 static int __init arch_timer_common_init(void)
1126 arch_timer_banner(arch_timers_present);
1127 arch_counter_register(arch_timers_present);
1128 return arch_timer_arch_init();
1132 * arch_timer_select_ppi() - Select suitable PPI for the current system.
1134 * If HYP mode is available, we know that the physical timer
1135 * has been configured to be accessible from PL1. Use it, so
1136 * that a guest can use the virtual timer instead.
1138 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1139 * accesses to CNTP_*_EL1 registers are silently redirected to
1140 * their CNTHP_*_EL2 counterparts, and use a different PPI
1143 * If no interrupt provided for virtual timer, we'll have to
1144 * stick to the physical timer. It'd better be accessible...
1145 * For arm64 we never use the secure interrupt.
1147 * Return: a suitable PPI type for the current system.
1149 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1151 if (is_kernel_in_hyp_mode())
1152 return ARCH_TIMER_HYP_PPI;
1154 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1155 return ARCH_TIMER_VIRT_PPI;
1157 if (IS_ENABLED(CONFIG_ARM64))
1158 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1160 return ARCH_TIMER_PHYS_SECURE_PPI;
1163 static int __init arch_timer_of_init(struct device_node *np)
1168 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1169 pr_warn("multiple nodes in dt, skipping\n");
1173 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1174 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1175 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1177 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1179 rate = arch_timer_get_cntfrq();
1180 arch_timer_of_configure_rate(rate, np);
1182 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1184 /* Check for globally applicable workarounds */
1185 arch_timer_check_ool_workaround(ate_match_dt, np);
1188 * If we cannot rely on firmware initializing the timer registers then
1189 * we should use the physical timers instead.
1191 if (IS_ENABLED(CONFIG_ARM) &&
1192 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1193 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1195 arch_timer_uses_ppi = arch_timer_select_ppi();
1197 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1198 pr_err("No interrupt available, giving up\n");
1202 /* On some systems, the counter stops ticking when in suspend. */
1203 arch_counter_suspend_stop = of_property_read_bool(np,
1204 "arm,no-tick-in-suspend");
1206 ret = arch_timer_register();
1210 if (arch_timer_needs_of_probing())
1213 return arch_timer_common_init();
1215 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1216 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1219 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1224 base = ioremap(frame->cntbase, frame->size);
1226 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1230 rate = readl_relaxed(base + CNTFRQ);
1237 static struct arch_timer_mem_frame * __init
1238 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1240 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1241 void __iomem *cntctlbase;
1245 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1247 pr_err("Can't map CNTCTLBase @ %pa\n",
1248 &timer_mem->cntctlbase);
1252 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1255 * Try to find a virtual capable frame. Otherwise fall back to a
1256 * physical capable frame.
1258 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1259 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1260 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1262 frame = &timer_mem->frame[i];
1266 /* Try enabling everything, and see what sticks */
1267 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1268 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1270 if ((cnttidr & CNTTIDR_VIRT(i)) &&
1271 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1273 arch_timer_mem_use_virtual = true;
1277 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1283 iounmap(cntctlbase);
1289 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1294 if (arch_timer_mem_use_virtual)
1295 irq = frame->virt_irq;
1297 irq = frame->phys_irq;
1300 pr_err("Frame missing %s irq.\n",
1301 arch_timer_mem_use_virtual ? "virt" : "phys");
1305 if (!request_mem_region(frame->cntbase, frame->size,
1309 base = ioremap(frame->cntbase, frame->size);
1311 pr_err("Can't map frame's registers\n");
1315 ret = arch_timer_mem_register(base, irq);
1321 arch_counter_base = base;
1322 arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1327 static int __init arch_timer_mem_of_init(struct device_node *np)
1329 struct arch_timer_mem *timer_mem;
1330 struct arch_timer_mem_frame *frame;
1331 struct device_node *frame_node;
1332 struct resource res;
1336 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1340 if (of_address_to_resource(np, 0, &res))
1342 timer_mem->cntctlbase = res.start;
1343 timer_mem->size = resource_size(&res);
1345 for_each_available_child_of_node(np, frame_node) {
1347 struct arch_timer_mem_frame *frame;
1349 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1350 pr_err(FW_BUG "Missing frame-number.\n");
1351 of_node_put(frame_node);
1354 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1355 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1356 ARCH_TIMER_MEM_MAX_FRAMES - 1);
1357 of_node_put(frame_node);
1360 frame = &timer_mem->frame[n];
1363 pr_err(FW_BUG "Duplicated frame-number.\n");
1364 of_node_put(frame_node);
1368 if (of_address_to_resource(frame_node, 0, &res)) {
1369 of_node_put(frame_node);
1372 frame->cntbase = res.start;
1373 frame->size = resource_size(&res);
1375 frame->virt_irq = irq_of_parse_and_map(frame_node,
1376 ARCH_TIMER_VIRT_SPI);
1377 frame->phys_irq = irq_of_parse_and_map(frame_node,
1378 ARCH_TIMER_PHYS_SPI);
1380 frame->valid = true;
1383 frame = arch_timer_mem_find_best_frame(timer_mem);
1385 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1386 &timer_mem->cntctlbase);
1391 rate = arch_timer_mem_frame_get_cntfrq(frame);
1392 arch_timer_of_configure_rate(rate, np);
1394 ret = arch_timer_mem_frame_register(frame);
1395 if (!ret && !arch_timer_needs_of_probing())
1396 ret = arch_timer_common_init();
1401 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1402 arch_timer_mem_of_init);
1404 #ifdef CONFIG_ACPI_GTDT
1406 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1408 struct arch_timer_mem_frame *frame;
1412 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1413 frame = &timer_mem->frame[i];
1418 rate = arch_timer_mem_frame_get_cntfrq(frame);
1419 if (rate == arch_timer_rate)
1422 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1424 (unsigned long)rate, (unsigned long)arch_timer_rate);
1432 static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1434 struct arch_timer_mem *timers, *timer;
1435 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1436 int timer_count, i, ret = 0;
1438 timers = kcalloc(platform_timer_count, sizeof(*timers),
1443 ret = acpi_arch_timer_mem_init(timers, &timer_count);
1444 if (ret || !timer_count)
1448 * While unlikely, it's theoretically possible that none of the frames
1449 * in a timer expose the combination of feature we want.
1451 for (i = 0; i < timer_count; i++) {
1454 frame = arch_timer_mem_find_best_frame(timer);
1458 ret = arch_timer_mem_verify_cntfrq(timer);
1460 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1464 if (!best_frame) /* implies !frame */
1466 * Only complain about missing suitable frames if we
1467 * haven't already found one in a previous iteration.
1469 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1470 &timer->cntctlbase);
1474 ret = arch_timer_mem_frame_register(best_frame);
1480 /* Initialize per-processor generic timer and memory-mapped timer(if present) */
1481 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1483 int ret, platform_timer_count;
1485 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1486 pr_warn("already initialized, skipping\n");
1490 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1492 ret = acpi_gtdt_init(table, &platform_timer_count);
1494 pr_err("Failed to init GTDT table.\n");
1498 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1499 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1501 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1502 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1504 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1505 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1507 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1510 * When probing via ACPI, we have no mechanism to override the sysreg
1511 * CNTFRQ value. This *must* be correct.
1513 arch_timer_rate = arch_timer_get_cntfrq();
1514 if (!arch_timer_rate) {
1515 pr_err(FW_BUG "frequency not available.\n");
1519 arch_timer_uses_ppi = arch_timer_select_ppi();
1520 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1521 pr_err("No interrupt available, giving up\n");
1525 /* Always-on capability */
1526 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1528 /* Check for globally applicable workarounds */
1529 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1531 ret = arch_timer_register();
1535 if (platform_timer_count &&
1536 arch_timer_mem_acpi_init(platform_timer_count))
1537 pr_err("Failed to initialize memory-mapped timer.\n");
1539 return arch_timer_common_init();
1541 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);