Merge drm/drm-next into drm-intel-gt-next
[platform/kernel/linux-starfive.git] / drivers / clocksource / timer-owl.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Actions Semi Owl timer
4  *
5  * Copyright 2012 Actions Semi Inc.
6  * Author: Actions Semi, Inc.
7  *
8  * Copyright (c) 2017 SUSE Linux GmbH
9  * Author: Andreas Färber
10  */
11
12 #include <linux/clk.h>
13 #include <linux/clockchips.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/irqreturn.h>
17 #include <linux/sched_clock.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/of_irq.h>
21
22 #define OWL_Tx_CTL              0x0
23 #define OWL_Tx_CMP              0x4
24 #define OWL_Tx_VAL              0x8
25
26 #define OWL_Tx_CTL_PD           BIT(0)
27 #define OWL_Tx_CTL_INTEN        BIT(1)
28 #define OWL_Tx_CTL_EN           BIT(2)
29
30 static void __iomem *owl_timer_base;
31 static void __iomem *owl_clksrc_base;
32 static void __iomem *owl_clkevt_base;
33
34 static inline void owl_timer_reset(void __iomem *base)
35 {
36         writel(0, base + OWL_Tx_CTL);
37         writel(0, base + OWL_Tx_VAL);
38         writel(0, base + OWL_Tx_CMP);
39 }
40
41 static inline void owl_timer_set_enabled(void __iomem *base, bool enabled)
42 {
43         u32 ctl = readl(base + OWL_Tx_CTL);
44
45         /* PD bit is cleared when set */
46         ctl &= ~OWL_Tx_CTL_PD;
47
48         if (enabled)
49                 ctl |= OWL_Tx_CTL_EN;
50         else
51                 ctl &= ~OWL_Tx_CTL_EN;
52
53         writel(ctl, base + OWL_Tx_CTL);
54 }
55
56 static u64 notrace owl_timer_sched_read(void)
57 {
58         return (u64)readl(owl_clksrc_base + OWL_Tx_VAL);
59 }
60
61 static int owl_timer_set_state_shutdown(struct clock_event_device *evt)
62 {
63         owl_timer_set_enabled(owl_clkevt_base, false);
64
65         return 0;
66 }
67
68 static int owl_timer_set_state_oneshot(struct clock_event_device *evt)
69 {
70         owl_timer_reset(owl_clkevt_base);
71
72         return 0;
73 }
74
75 static int owl_timer_tick_resume(struct clock_event_device *evt)
76 {
77         return 0;
78 }
79
80 static int owl_timer_set_next_event(unsigned long evt,
81                                     struct clock_event_device *ev)
82 {
83         void __iomem *base = owl_clkevt_base;
84
85         owl_timer_set_enabled(base, false);
86         writel(OWL_Tx_CTL_INTEN, base + OWL_Tx_CTL);
87         writel(0, base + OWL_Tx_VAL);
88         writel(evt, base + OWL_Tx_CMP);
89         owl_timer_set_enabled(base, true);
90
91         return 0;
92 }
93
94 static struct clock_event_device owl_clockevent = {
95         .name                   = "owl_tick",
96         .rating                 = 200,
97         .features               = CLOCK_EVT_FEAT_ONESHOT |
98                                   CLOCK_EVT_FEAT_DYNIRQ,
99         .set_state_shutdown     = owl_timer_set_state_shutdown,
100         .set_state_oneshot      = owl_timer_set_state_oneshot,
101         .tick_resume            = owl_timer_tick_resume,
102         .set_next_event         = owl_timer_set_next_event,
103 };
104
105 static irqreturn_t owl_timer1_interrupt(int irq, void *dev_id)
106 {
107         struct clock_event_device *evt = (struct clock_event_device *)dev_id;
108
109         writel(OWL_Tx_CTL_PD, owl_clkevt_base + OWL_Tx_CTL);
110
111         evt->event_handler(evt);
112
113         return IRQ_HANDLED;
114 }
115
116 static int __init owl_timer_init(struct device_node *node)
117 {
118         struct clk *clk;
119         unsigned long rate;
120         int timer1_irq, ret;
121
122         owl_timer_base = of_io_request_and_map(node, 0, "owl-timer");
123         if (IS_ERR(owl_timer_base)) {
124                 pr_err("Can't map timer registers\n");
125                 return PTR_ERR(owl_timer_base);
126         }
127
128         owl_clksrc_base = owl_timer_base + 0x08;
129         owl_clkevt_base = owl_timer_base + 0x14;
130
131         timer1_irq = of_irq_get_byname(node, "timer1");
132         if (timer1_irq <= 0) {
133                 pr_err("Can't parse timer1 IRQ\n");
134                 return -EINVAL;
135         }
136
137         clk = of_clk_get(node, 0);
138         if (IS_ERR(clk)) {
139                 ret = PTR_ERR(clk);
140                 pr_err("Failed to get clock for clocksource (%d)\n", ret);
141                 return ret;
142         }
143
144         rate = clk_get_rate(clk);
145
146         owl_timer_reset(owl_clksrc_base);
147         owl_timer_set_enabled(owl_clksrc_base, true);
148
149         sched_clock_register(owl_timer_sched_read, 32, rate);
150         ret = clocksource_mmio_init(owl_clksrc_base + OWL_Tx_VAL, node->name,
151                                     rate, 200, 32, clocksource_mmio_readl_up);
152         if (ret) {
153                 pr_err("Failed to register clocksource (%d)\n", ret);
154                 return ret;
155         }
156
157         owl_timer_reset(owl_clkevt_base);
158
159         ret = request_irq(timer1_irq, owl_timer1_interrupt, IRQF_TIMER,
160                           "owl-timer", &owl_clockevent);
161         if (ret) {
162                 pr_err("failed to request irq %d\n", timer1_irq);
163                 return ret;
164         }
165
166         owl_clockevent.cpumask = cpumask_of(0);
167         owl_clockevent.irq = timer1_irq;
168
169         clockevents_config_and_register(&owl_clockevent, rate,
170                                         0xf, 0xffffffff);
171
172         return 0;
173 }
174 TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
175 TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
176 TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);