1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2014-2018 MediaTek Inc.
5 * Library for MediaTek External Interrupt Support
7 * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8 * Sean Wang <sean.wang@mediatek.com>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/gpio.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_irq.h>
18 #include <linux/platform_device.h>
22 #define MTK_EINT_EDGE_SENSITIVE 0
23 #define MTK_EINT_LEVEL_SENSITIVE 1
24 #define MTK_EINT_DBNC_SET_DBNC_BITS 4
25 #define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
26 #define MTK_EINT_DBNC_SET_EN (0x1 << 0)
28 static const struct mtk_eint_regs mtk_generic_eint_regs = {
49 static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
50 unsigned int eint_num,
53 unsigned int eint_base = 0;
56 if (eint_num >= eint->hw->ap_num)
57 eint_base = eint->hw->ap_num;
59 reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4;
64 static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
65 unsigned int eint_num)
68 unsigned int bit = BIT(eint_num % 32);
69 void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
73 sens = MTK_EINT_LEVEL_SENSITIVE;
75 sens = MTK_EINT_EDGE_SENSITIVE;
77 if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE)
83 static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
85 int start_level, curr_level;
86 unsigned int reg_offset;
87 u32 mask = BIT(hwirq & 0x1f);
88 u32 port = (hwirq >> 5) & eint->hw->port_mask;
89 void __iomem *reg = eint->base + (port << 2);
91 curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
94 start_level = curr_level;
96 reg_offset = eint->regs->pol_clr;
98 reg_offset = eint->regs->pol_set;
99 writel(mask, reg + reg_offset);
101 curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
103 } while (start_level != curr_level);
108 static void mtk_eint_mask(struct irq_data *d)
110 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
111 u32 mask = BIT(d->hwirq & 0x1f);
112 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
113 eint->regs->mask_set);
118 static void mtk_eint_unmask(struct irq_data *d)
120 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
121 u32 mask = BIT(d->hwirq & 0x1f);
122 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
123 eint->regs->mask_clr);
127 if (eint->dual_edge[d->hwirq])
128 mtk_eint_flip_edge(eint, d->hwirq);
131 static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
132 unsigned int eint_num)
134 unsigned int bit = BIT(eint_num % 32);
135 void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
138 return !!(readl(reg) & bit);
141 static void mtk_eint_ack(struct irq_data *d)
143 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
144 u32 mask = BIT(d->hwirq & 0x1f);
145 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
151 static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
153 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
154 u32 mask = BIT(d->hwirq & 0x1f);
157 if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
158 ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
160 "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
161 d->irq, d->hwirq, type);
165 if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
166 eint->dual_edge[d->hwirq] = 1;
168 eint->dual_edge[d->hwirq] = 0;
170 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
171 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
174 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
178 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
179 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
182 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
186 if (eint->dual_edge[d->hwirq])
187 mtk_eint_flip_edge(eint, d->hwirq);
192 static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
194 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
195 int shift = d->hwirq & 0x1f;
196 int reg = d->hwirq >> 5;
199 eint->wake_mask[reg] |= BIT(shift);
201 eint->wake_mask[reg] &= ~BIT(shift);
206 static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
207 void __iomem *base, u32 *buf)
212 for (port = 0; port < eint->hw->ports; port++) {
213 reg = base + (port << 2);
214 writel_relaxed(~buf[port], reg + eint->regs->mask_set);
215 writel_relaxed(buf[port], reg + eint->regs->mask_clr);
219 static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
220 void __iomem *base, u32 *buf)
225 for (port = 0; port < eint->hw->ports; port++) {
226 reg = base + eint->regs->mask + (port << 2);
227 buf[port] = ~readl_relaxed(reg);
228 /* Mask is 0 when irq is enabled, and 1 when disabled. */
232 static int mtk_eint_irq_request_resources(struct irq_data *d)
234 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
235 struct gpio_chip *gpio_c;
239 err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
242 dev_err(eint->dev, "Can not find pin\n");
246 err = gpiochip_lock_as_irq(gpio_c, gpio_n);
248 dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
253 err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
255 dev_err(eint->dev, "Can not eint mode\n");
262 static void mtk_eint_irq_release_resources(struct irq_data *d)
264 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
265 struct gpio_chip *gpio_c;
268 eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
271 gpiochip_unlock_as_irq(gpio_c, gpio_n);
274 static struct irq_chip mtk_eint_irq_chip = {
276 .irq_disable = mtk_eint_mask,
277 .irq_mask = mtk_eint_mask,
278 .irq_unmask = mtk_eint_unmask,
279 .irq_ack = mtk_eint_ack,
280 .irq_set_type = mtk_eint_set_type,
281 .irq_set_wake = mtk_eint_irq_set_wake,
282 .irq_request_resources = mtk_eint_irq_request_resources,
283 .irq_release_resources = mtk_eint_irq_release_resources,
286 static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
288 void __iomem *reg = eint->base + eint->regs->dom_en;
291 for (i = 0; i < eint->hw->ap_num; i += 32) {
292 writel(0xffffffff, reg);
300 mtk_eint_debounce_process(struct mtk_eint *eint, int index)
302 unsigned int rst, ctrl_offset;
303 unsigned int bit, dbnc;
305 ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl;
306 dbnc = readl(eint->base + ctrl_offset);
307 bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8);
308 if ((bit & dbnc) > 0) {
309 ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set;
310 rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8);
311 writel(rst, eint->base + ctrl_offset);
315 static void mtk_eint_irq_handler(struct irq_desc *desc)
317 struct irq_chip *chip = irq_desc_get_chip(desc);
318 struct mtk_eint *eint = irq_desc_get_handler_data(desc);
319 unsigned int status, eint_num;
320 int offset, index, virq;
321 void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
322 int dual_edge, start_level, curr_level;
324 chained_irq_enter(chip, desc);
325 for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32,
329 offset = __ffs(status);
330 index = eint_num + offset;
331 virq = irq_find_mapping(eint->domain, index);
332 status &= ~BIT(offset);
334 dual_edge = eint->dual_edge[index];
337 * Clear soft-irq in case we raised it last
340 writel(BIT(offset), reg - eint->regs->stat +
341 eint->regs->soft_clr);
344 eint->gpio_xlate->get_gpio_state(eint->pctl,
348 generic_handle_irq(virq);
351 curr_level = mtk_eint_flip_edge(eint, index);
354 * If level changed, we might lost one edge
355 * interrupt, raised it through soft-irq.
357 if (start_level != curr_level)
358 writel(BIT(offset), reg -
360 eint->regs->soft_set);
363 if (index < eint->hw->db_cnt)
364 mtk_eint_debounce_process(eint, index);
367 chained_irq_exit(chip, desc);
370 int mtk_eint_do_suspend(struct mtk_eint *eint)
372 mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
373 mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
378 int mtk_eint_do_resume(struct mtk_eint *eint)
380 mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
385 int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
386 unsigned int debounce)
388 int virq, eint_offset;
389 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
391 static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
392 64000, 128000, 256000};
395 virq = irq_find_mapping(eint->domain, eint_num);
396 eint_offset = (eint_num % 4) * 8;
397 d = irq_get_irq_data(virq);
399 set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set;
400 clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr;
402 if (!mtk_eint_can_en_debounce(eint, eint_num))
405 dbnc = ARRAY_SIZE(debounce_time);
406 for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
407 if (debounce <= debounce_time[i]) {
413 if (!mtk_eint_get_mask(eint, eint_num)) {
420 clr_bit = 0xff << eint_offset;
421 writel(clr_bit, eint->base + clr_offset);
423 bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
425 rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
426 writel(rst | bit, eint->base + set_offset);
429 * Delay a while (more than 2T) to wait for hw debounce counter reset
439 int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
443 irq = irq_find_mapping(eint->domain, eint_n);
450 int mtk_eint_do_init(struct mtk_eint *eint)
454 /* If clients don't assign a specific regs, let's use generic one */
456 eint->regs = &mtk_generic_eint_regs;
458 eint->wake_mask = devm_kcalloc(eint->dev, eint->hw->ports,
459 sizeof(*eint->wake_mask), GFP_KERNEL);
460 if (!eint->wake_mask)
463 eint->cur_mask = devm_kcalloc(eint->dev, eint->hw->ports,
464 sizeof(*eint->cur_mask), GFP_KERNEL);
468 eint->dual_edge = devm_kcalloc(eint->dev, eint->hw->ap_num,
469 sizeof(int), GFP_KERNEL);
470 if (!eint->dual_edge)
473 eint->domain = irq_domain_add_linear(eint->dev->of_node,
475 &irq_domain_simple_ops, NULL);
479 mtk_eint_hw_init(eint);
480 for (i = 0; i < eint->hw->ap_num; i++) {
481 int virq = irq_create_mapping(eint->domain, i);
483 irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
485 irq_set_chip_data(virq, eint);
488 irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,