blackfin: mach-common: add sec support for bf60x
[profile/ivi/kernel-adaptation-intel-automotive.git] / arch / blackfin / mach-common / ints-priority.c
1 /*
2  * Set up the interrupt priorities
3  *
4  * Copyright  2004-2009 Analog Devices Inc.
5  *                 2003 Bas Vermeulen <bas@buyways.nl>
6  *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
7  *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
8  *                 1999 D. Jeff Dionne <jeff@uclinux.org>
9  *                 1996 Roman Zippel
10  *
11  * Licensed under the GPL-2
12  */
13
14 #include <linux/module.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/seq_file.h>
17 #include <linux/irq.h>
18 #include <linux/sched.h>
19 #include <linux/syscore_ops.h>
20 #include <asm/delay.h>
21 #ifdef CONFIG_IPIPE
22 #include <linux/ipipe.h>
23 #endif
24 #include <asm/traps.h>
25 #include <asm/blackfin.h>
26 #include <asm/gpio.h>
27 #include <asm/irq_handler.h>
28 #include <asm/dpmc.h>
29
30 #ifndef CONFIG_BF60x
31 # define SIC_SYSIRQ(irq)        (irq - (IRQ_CORETMR + 1))
32 #else
33 # define SIC_SYSIRQ(irq)        ((irq) - IVG15)
34 #endif
35
36 /*
37  * NOTES:
38  * - we have separated the physical Hardware interrupt from the
39  * levels that the LINUX kernel sees (see the description in irq.h)
40  * -
41  */
42
43 #ifndef CONFIG_SMP
44 /* Initialize this to an actual value to force it into the .data
45  * section so that we know it is properly initialized at entry into
46  * the kernel but before bss is initialized to zero (which is where
47  * it would live otherwise).  The 0x1f magic represents the IRQs we
48  * cannot actually mask out in hardware.
49  */
50 unsigned long bfin_irq_flags = 0x1f;
51 EXPORT_SYMBOL(bfin_irq_flags);
52 #endif
53
54 #ifdef CONFIG_PM
55 unsigned long bfin_sic_iwr[3];  /* Up to 3 SIC_IWRx registers */
56 unsigned vr_wakeup;
57 #endif
58
59 #ifndef CONFIG_BF60x
60 static struct ivgx {
61         /* irq number for request_irq, available in mach-bf5xx/irq.h */
62         unsigned int irqno;
63         /* corresponding bit in the SIC_ISR register */
64         unsigned int isrflag;
65 } ivg_table[NR_PERI_INTS];
66
67 static struct ivg_slice {
68         /* position of first irq in ivg_table for given ivg */
69         struct ivgx *ifirst;
70         struct ivgx *istop;
71 } ivg7_13[IVG13 - IVG7 + 1];
72
73
74 /*
75  * Search SIC_IAR and fill tables with the irqvalues
76  * and their positions in the SIC_ISR register.
77  */
78 static void __init search_IAR(void)
79 {
80         unsigned ivg, irq_pos = 0;
81         for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
82                 int irqN;
83
84                 ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
85
86                 for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
87                         int irqn;
88                         u32 iar =
89                                 bfin_read32((unsigned long *)SIC_IAR0 +
90 #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
91         defined(CONFIG_BF538) || defined(CONFIG_BF539)
92                                 ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
93 #else
94                                 (irqN >> 3)
95 #endif
96                                 );
97                         for (irqn = irqN; irqn < irqN + 4; ++irqn) {
98                                 int iar_shift = (irqn & 7) * 4;
99                                 if (ivg == (0xf & (iar >> iar_shift))) {
100                                         ivg_table[irq_pos].irqno = IVG7 + irqn;
101                                         ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
102                                         ivg7_13[ivg].istop++;
103                                         irq_pos++;
104                                 }
105                         }
106                 }
107         }
108 }
109 #endif
110
111 /*
112  * This is for core internal IRQs
113  */
114 void bfin_ack_noop(struct irq_data *d)
115 {
116         /* Dummy function.  */
117 }
118
119 static void bfin_core_mask_irq(struct irq_data *d)
120 {
121         bfin_irq_flags &= ~(1 << d->irq);
122         if (!hard_irqs_disabled())
123                 hard_local_irq_enable();
124 }
125
126 static void bfin_core_unmask_irq(struct irq_data *d)
127 {
128         bfin_irq_flags |= 1 << d->irq;
129         /*
130          * If interrupts are enabled, IMASK must contain the same value
131          * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
132          * are currently disabled we need not do anything; one of the
133          * callers will take care of setting IMASK to the proper value
134          * when reenabling interrupts.
135          * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
136          * what we need.
137          */
138         if (!hard_irqs_disabled())
139                 hard_local_irq_enable();
140         return;
141 }
142
143 void bfin_internal_mask_irq(unsigned int irq)
144 {
145         unsigned long flags = hard_local_irq_save();
146 #ifndef CONFIG_BF60x
147 #ifdef SIC_IMASK0
148         unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
149         unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
150         bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
151                         ~(1 << mask_bit));
152 # if defined(CONFIG_SMP) || defined(CONFIG_ICC)
153         bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
154                         ~(1 << mask_bit));
155 # endif
156 #else
157         bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
158                         ~(1 << SIC_SYSIRQ(irq)));
159 #endif /* end of SIC_IMASK0 */
160 #endif
161         hard_local_irq_restore(flags);
162 }
163
164 static void bfin_internal_mask_irq_chip(struct irq_data *d)
165 {
166         bfin_internal_mask_irq(d->irq);
167 }
168
169 #ifdef CONFIG_SMP
170 void bfin_internal_unmask_irq_affinity(unsigned int irq,
171                 const struct cpumask *affinity)
172 #else
173 void bfin_internal_unmask_irq(unsigned int irq)
174 #endif
175 {
176         unsigned long flags = hard_local_irq_save();
177
178 #ifndef CONFIG_BF60x
179 #ifdef SIC_IMASK0
180         unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
181         unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
182 # ifdef CONFIG_SMP
183         if (cpumask_test_cpu(0, affinity))
184 # endif
185                 bfin_write_SIC_IMASK(mask_bank,
186                                 bfin_read_SIC_IMASK(mask_bank) |
187                                 (1 << mask_bit));
188 # ifdef CONFIG_SMP
189         if (cpumask_test_cpu(1, affinity))
190                 bfin_write_SICB_IMASK(mask_bank,
191                                 bfin_read_SICB_IMASK(mask_bank) |
192                                 (1 << mask_bit));
193 # endif
194 #else
195         bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
196                         (1 << SIC_SYSIRQ(irq)));
197 #endif
198 #endif
199         hard_local_irq_restore(flags);
200 }
201
202 #ifdef CONFIG_BF60x
203 static void bfin_sec_preflow_handler(struct irq_data *d)
204 {
205         unsigned long flags = hard_local_irq_save();
206         unsigned int sid = SIC_SYSIRQ(d->irq);
207
208         bfin_write_SEC_SCI(0, SEC_CSID, sid);
209
210         hard_local_irq_restore(flags);
211 }
212
213 static void bfin_sec_mask_ack_irq(struct irq_data *d)
214 {
215         unsigned long flags = hard_local_irq_save();
216         unsigned int sid = SIC_SYSIRQ(d->irq);
217
218         bfin_write_SEC_SCI(0, SEC_CSID, sid);
219
220         hard_local_irq_restore(flags);
221 }
222
223 static void bfin_sec_unmask_irq(struct irq_data *d)
224 {
225         unsigned long flags = hard_local_irq_save();
226         unsigned int sid = SIC_SYSIRQ(d->irq);
227
228         bfin_write32(SEC_END, sid);
229
230         hard_local_irq_restore(flags);
231 }
232
233 static void bfin_sec_enable_ssi(unsigned int sid)
234 {
235         unsigned long flags = hard_local_irq_save();
236         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
237
238         reg_sctl |= SEC_SCTL_SRC_EN;
239         bfin_write_SEC_SCTL(sid, reg_sctl);
240
241         hard_local_irq_restore(flags);
242 }
243
244 static void bfin_sec_disable_ssi(unsigned int sid)
245 {
246         unsigned long flags = hard_local_irq_save();
247         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
248
249         reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
250         bfin_write_SEC_SCTL(sid, reg_sctl);
251
252         hard_local_irq_restore(flags);
253 }
254
255 static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
256 {
257         unsigned long flags = hard_local_irq_save();
258         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
259
260         reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
261         bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
262
263         hard_local_irq_restore(flags);
264 }
265
266 static void bfin_sec_enable_sci(unsigned int sid)
267 {
268         unsigned long flags = hard_local_irq_save();
269         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
270
271         if (sid == SIC_SYSIRQ(IRQ_WATCH0))
272                 reg_sctl |= SEC_SCTL_FAULT_EN;
273         else
274                 reg_sctl |= SEC_SCTL_INT_EN;
275         bfin_write_SEC_SCTL(sid, reg_sctl);
276
277         hard_local_irq_restore(flags);
278 }
279
280 static void bfin_sec_disable_sci(unsigned int sid)
281 {
282         unsigned long flags = hard_local_irq_save();
283         uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
284
285         reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
286         bfin_write_SEC_SCTL(sid, reg_sctl);
287
288         hard_local_irq_restore(flags);
289 }
290
291 static void bfin_sec_enable(struct irq_data *d)
292 {
293         unsigned long flags = hard_local_irq_save();
294         unsigned int sid = SIC_SYSIRQ(d->irq);
295
296         bfin_sec_enable_sci(sid);
297         bfin_sec_enable_ssi(sid);
298
299         hard_local_irq_restore(flags);
300 }
301
302 static void bfin_sec_disable(struct irq_data *d)
303 {
304         unsigned long flags = hard_local_irq_save();
305         unsigned int sid = SIC_SYSIRQ(d->irq);
306
307         bfin_sec_disable_sci(sid);
308         bfin_sec_disable_ssi(sid);
309
310         hard_local_irq_restore(flags);
311 }
312
313 static void bfin_sec_raise_irq(unsigned int sid)
314 {
315         unsigned long flags = hard_local_irq_save();
316
317         bfin_write32(SEC_RAISE, sid);
318
319         hard_local_irq_restore(flags);
320 }
321
322 static void init_software_driven_irq(void)
323 {
324         bfin_sec_set_ssi_coreid(34, 0);
325         bfin_sec_set_ssi_coreid(35, 1);
326         bfin_sec_set_ssi_coreid(36, 0);
327         bfin_sec_set_ssi_coreid(37, 1);
328 }
329
330 void bfin_sec_resume(void)
331 {
332         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
333         udelay(100);
334         bfin_write_SEC_GCTL(SEC_GCTL_EN);
335         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
336 }
337
338 void handle_sec_sfi_fault(uint32_t gstat)
339 {
340
341 }
342
343 void handle_sec_sci_fault(uint32_t gstat)
344 {
345         uint32_t core_id;
346         uint32_t cstat;
347
348         core_id = gstat & SEC_GSTAT_SCI;
349         cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
350         if (cstat & SEC_CSTAT_ERR) {
351                 switch (cstat & SEC_CSTAT_ERRC) {
352                 case SEC_CSTAT_ACKERR:
353                         printk(KERN_DEBUG "sec ack err\n");
354                         break;
355                 default:
356                         printk(KERN_DEBUG "sec sci unknow err\n");
357                 }
358         }
359
360 }
361
362 void handle_sec_ssi_fault(uint32_t gstat)
363 {
364         uint32_t sid;
365         uint32_t sstat;
366
367         sid = gstat & SEC_GSTAT_SID;
368         sstat = bfin_read_SEC_SSTAT(sid);
369
370 }
371
372 void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
373 {
374         uint32_t sec_gstat;
375
376         raw_spin_lock(&desc->lock);
377
378         sec_gstat = bfin_read32(SEC_GSTAT);
379         if (sec_gstat & SEC_GSTAT_ERR) {
380
381                 switch (sec_gstat & SEC_GSTAT_ERRC) {
382                 case 0:
383                         handle_sec_sfi_fault(sec_gstat);
384                         break;
385                 case SEC_GSTAT_SCIERR:
386                         handle_sec_sci_fault(sec_gstat);
387                         break;
388                 case SEC_GSTAT_SSIERR:
389                         handle_sec_ssi_fault(sec_gstat);
390                         break;
391                 }
392
393
394         }
395
396         raw_spin_unlock(&desc->lock);
397 }
398
399 static int sec_suspend(void)
400 {
401         return 0;
402 }
403
404 static void sec_resume(void)
405 {
406         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
407         udelay(100);
408         bfin_write_SEC_GCTL(SEC_GCTL_EN);
409         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
410 }
411
412 static struct syscore_ops sec_pm_syscore_ops = {
413         .suspend = sec_suspend,
414         .resume = sec_resume,
415 };
416
417 #endif
418
419 #ifdef CONFIG_SMP
420 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
421 {
422         bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
423 }
424
425 static int bfin_internal_set_affinity(struct irq_data *d,
426                                       const struct cpumask *mask, bool force)
427 {
428         bfin_internal_mask_irq(d->irq);
429         bfin_internal_unmask_irq_affinity(d->irq, mask);
430
431         return 0;
432 }
433 #else
434 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
435 {
436         bfin_internal_unmask_irq(d->irq);
437 }
438 #endif
439
440 #ifdef CONFIG_PM
441 int bfin_internal_set_wake(unsigned int irq, unsigned int state)
442 {
443         u32 bank, bit, wakeup = 0;
444         unsigned long flags;
445         bank = SIC_SYSIRQ(irq) / 32;
446         bit = SIC_SYSIRQ(irq) % 32;
447
448         switch (irq) {
449 #ifdef IRQ_RTC
450         case IRQ_RTC:
451         wakeup |= WAKE;
452         break;
453 #endif
454 #ifdef IRQ_CAN0_RX
455         case IRQ_CAN0_RX:
456         wakeup |= CANWE;
457         break;
458 #endif
459 #ifdef IRQ_CAN1_RX
460         case IRQ_CAN1_RX:
461         wakeup |= CANWE;
462         break;
463 #endif
464 #ifdef IRQ_USB_INT0
465         case IRQ_USB_INT0:
466         wakeup |= USBWE;
467         break;
468 #endif
469 #ifdef CONFIG_BF54x
470         case IRQ_CNT:
471         wakeup |= ROTWE;
472         break;
473 #endif
474         default:
475         break;
476         }
477
478         flags = hard_local_irq_save();
479
480         if (state) {
481                 bfin_sic_iwr[bank] |= (1 << bit);
482                 vr_wakeup  |= wakeup;
483
484         } else {
485                 bfin_sic_iwr[bank] &= ~(1 << bit);
486                 vr_wakeup  &= ~wakeup;
487         }
488
489         hard_local_irq_restore(flags);
490
491         return 0;
492 }
493
494 static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
495 {
496         return bfin_internal_set_wake(d->irq, state);
497 }
498 #else
499 # define bfin_internal_set_wake_chip NULL
500 #endif
501
502 static struct irq_chip bfin_core_irqchip = {
503         .name = "CORE",
504         .irq_mask = bfin_core_mask_irq,
505         .irq_unmask = bfin_core_unmask_irq,
506 };
507
508 static struct irq_chip bfin_internal_irqchip = {
509         .name = "INTN",
510         .irq_mask = bfin_internal_mask_irq_chip,
511         .irq_unmask = bfin_internal_unmask_irq_chip,
512         .irq_disable = bfin_internal_mask_irq_chip,
513         .irq_enable = bfin_internal_unmask_irq_chip,
514 #ifdef CONFIG_SMP
515         .irq_set_affinity = bfin_internal_set_affinity,
516 #endif
517         .irq_set_wake = bfin_internal_set_wake_chip,
518 };
519
520 #ifdef CONFIG_BF60x
521 static struct irq_chip bfin_sec_irqchip = {
522         .name = "SEC",
523         .irq_mask_ack = bfin_sec_mask_ack_irq,
524         .irq_mask = bfin_sec_mask_ack_irq,
525         .irq_unmask = bfin_sec_unmask_irq,
526         .irq_eoi = bfin_sec_unmask_irq,
527         .irq_disable = bfin_sec_disable,
528         .irq_enable = bfin_sec_enable,
529 };
530 #endif
531
532 void bfin_handle_irq(unsigned irq)
533 {
534 #ifdef CONFIG_IPIPE
535         struct pt_regs regs;    /* Contents not used. */
536         ipipe_trace_irq_entry(irq);
537         __ipipe_handle_irq(irq, &regs);
538         ipipe_trace_irq_exit(irq);
539 #else /* !CONFIG_IPIPE */
540         generic_handle_irq(irq);
541 #endif  /* !CONFIG_IPIPE */
542 }
543
544 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
545 static int mac_stat_int_mask;
546
547 static void bfin_mac_status_ack_irq(unsigned int irq)
548 {
549         switch (irq) {
550         case IRQ_MAC_MMCINT:
551                 bfin_write_EMAC_MMC_TIRQS(
552                         bfin_read_EMAC_MMC_TIRQE() &
553                         bfin_read_EMAC_MMC_TIRQS());
554                 bfin_write_EMAC_MMC_RIRQS(
555                         bfin_read_EMAC_MMC_RIRQE() &
556                         bfin_read_EMAC_MMC_RIRQS());
557                 break;
558         case IRQ_MAC_RXFSINT:
559                 bfin_write_EMAC_RX_STKY(
560                         bfin_read_EMAC_RX_IRQE() &
561                         bfin_read_EMAC_RX_STKY());
562                 break;
563         case IRQ_MAC_TXFSINT:
564                 bfin_write_EMAC_TX_STKY(
565                         bfin_read_EMAC_TX_IRQE() &
566                         bfin_read_EMAC_TX_STKY());
567                 break;
568         case IRQ_MAC_WAKEDET:
569                  bfin_write_EMAC_WKUP_CTL(
570                         bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
571                 break;
572         default:
573                 /* These bits are W1C */
574                 bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
575                 break;
576         }
577 }
578
579 static void bfin_mac_status_mask_irq(struct irq_data *d)
580 {
581         unsigned int irq = d->irq;
582
583         mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
584 #ifdef BF537_FAMILY
585         switch (irq) {
586         case IRQ_MAC_PHYINT:
587                 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
588                 break;
589         default:
590                 break;
591         }
592 #else
593         if (!mac_stat_int_mask)
594                 bfin_internal_mask_irq(IRQ_MAC_ERROR);
595 #endif
596         bfin_mac_status_ack_irq(irq);
597 }
598
599 static void bfin_mac_status_unmask_irq(struct irq_data *d)
600 {
601         unsigned int irq = d->irq;
602
603 #ifdef BF537_FAMILY
604         switch (irq) {
605         case IRQ_MAC_PHYINT:
606                 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
607                 break;
608         default:
609                 break;
610         }
611 #else
612         if (!mac_stat_int_mask)
613                 bfin_internal_unmask_irq(IRQ_MAC_ERROR);
614 #endif
615         mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
616 }
617
618 #ifdef CONFIG_PM
619 int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
620 {
621 #ifdef BF537_FAMILY
622         return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
623 #else
624         return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
625 #endif
626 }
627 #else
628 # define bfin_mac_status_set_wake NULL
629 #endif
630
631 static struct irq_chip bfin_mac_status_irqchip = {
632         .name = "MACST",
633         .irq_mask = bfin_mac_status_mask_irq,
634         .irq_unmask = bfin_mac_status_unmask_irq,
635         .irq_set_wake = bfin_mac_status_set_wake,
636 };
637
638 void bfin_demux_mac_status_irq(unsigned int int_err_irq,
639                                struct irq_desc *inta_desc)
640 {
641         int i, irq = 0;
642         u32 status = bfin_read_EMAC_SYSTAT();
643
644         for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
645                 if (status & (1L << i)) {
646                         irq = IRQ_MAC_PHYINT + i;
647                         break;
648                 }
649
650         if (irq) {
651                 if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
652                         bfin_handle_irq(irq);
653                 } else {
654                         bfin_mac_status_ack_irq(irq);
655                         pr_debug("IRQ %d:"
656                                         " MASKED MAC ERROR INTERRUPT ASSERTED\n",
657                                         irq);
658                 }
659         } else
660                 printk(KERN_ERR
661                                 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
662                                 " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
663                                 "(EMAC_SYSTAT=0x%X)\n",
664                                 __func__, __FILE__, __LINE__, status);
665 }
666 #endif
667
668 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
669 {
670 #ifdef CONFIG_IPIPE
671         handle = handle_level_irq;
672 #endif
673         __irq_set_handler_locked(irq, handle);
674 }
675
676 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
677 extern void bfin_gpio_irq_prepare(unsigned gpio);
678
679 #if !BFIN_GPIO_PINT
680
681 static void bfin_gpio_ack_irq(struct irq_data *d)
682 {
683         /* AFAIK ack_irq in case mask_ack is provided
684          * get's only called for edge sense irqs
685          */
686         set_gpio_data(irq_to_gpio(d->irq), 0);
687 }
688
689 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
690 {
691         unsigned int irq = d->irq;
692         u32 gpionr = irq_to_gpio(irq);
693
694         if (!irqd_is_level_type(d))
695                 set_gpio_data(gpionr, 0);
696
697         set_gpio_maska(gpionr, 0);
698 }
699
700 static void bfin_gpio_mask_irq(struct irq_data *d)
701 {
702         set_gpio_maska(irq_to_gpio(d->irq), 0);
703 }
704
705 static void bfin_gpio_unmask_irq(struct irq_data *d)
706 {
707         set_gpio_maska(irq_to_gpio(d->irq), 1);
708 }
709
710 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
711 {
712         u32 gpionr = irq_to_gpio(d->irq);
713
714         if (__test_and_set_bit(gpionr, gpio_enabled))
715                 bfin_gpio_irq_prepare(gpionr);
716
717         bfin_gpio_unmask_irq(d);
718
719         return 0;
720 }
721
722 static void bfin_gpio_irq_shutdown(struct irq_data *d)
723 {
724         u32 gpionr = irq_to_gpio(d->irq);
725
726         bfin_gpio_mask_irq(d);
727         __clear_bit(gpionr, gpio_enabled);
728         bfin_gpio_irq_free(gpionr);
729 }
730
731 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
732 {
733         unsigned int irq = d->irq;
734         int ret;
735         char buf[16];
736         u32 gpionr = irq_to_gpio(irq);
737
738         if (type == IRQ_TYPE_PROBE) {
739                 /* only probe unenabled GPIO interrupt lines */
740                 if (test_bit(gpionr, gpio_enabled))
741                         return 0;
742                 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
743         }
744
745         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
746                     IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
747
748                 snprintf(buf, 16, "gpio-irq%d", irq);
749                 ret = bfin_gpio_irq_request(gpionr, buf);
750                 if (ret)
751                         return ret;
752
753                 if (__test_and_set_bit(gpionr, gpio_enabled))
754                         bfin_gpio_irq_prepare(gpionr);
755
756         } else {
757                 __clear_bit(gpionr, gpio_enabled);
758                 return 0;
759         }
760
761         set_gpio_inen(gpionr, 0);
762         set_gpio_dir(gpionr, 0);
763
764         if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
765             == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
766                 set_gpio_both(gpionr, 1);
767         else
768                 set_gpio_both(gpionr, 0);
769
770         if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
771                 set_gpio_polar(gpionr, 1);      /* low or falling edge denoted by one */
772         else
773                 set_gpio_polar(gpionr, 0);      /* high or rising edge denoted by zero */
774
775         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
776                 set_gpio_edge(gpionr, 1);
777                 set_gpio_inen(gpionr, 1);
778                 set_gpio_data(gpionr, 0);
779
780         } else {
781                 set_gpio_edge(gpionr, 0);
782                 set_gpio_inen(gpionr, 1);
783         }
784
785         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
786                 bfin_set_irq_handler(irq, handle_edge_irq);
787         else
788                 bfin_set_irq_handler(irq, handle_level_irq);
789
790         return 0;
791 }
792
793 #ifdef CONFIG_PM
794 static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
795 {
796         return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
797 }
798 #else
799 # define bfin_gpio_set_wake NULL
800 #endif
801
802 static void bfin_demux_gpio_block(unsigned int irq)
803 {
804         unsigned int gpio, mask;
805
806         gpio = irq_to_gpio(irq);
807         mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
808
809         while (mask) {
810                 if (mask & 1)
811                         bfin_handle_irq(irq);
812                 irq++;
813                 mask >>= 1;
814         }
815 }
816
817 void bfin_demux_gpio_irq(unsigned int inta_irq,
818                         struct irq_desc *desc)
819 {
820         unsigned int irq;
821
822         switch (inta_irq) {
823 #if defined(BF537_FAMILY)
824         case IRQ_PF_INTA_PG_INTA:
825                 bfin_demux_gpio_block(IRQ_PF0);
826                 irq = IRQ_PG0;
827                 break;
828         case IRQ_PH_INTA_MAC_RX:
829                 irq = IRQ_PH0;
830                 break;
831 #elif defined(BF533_FAMILY)
832         case IRQ_PROG_INTA:
833                 irq = IRQ_PF0;
834                 break;
835 #elif defined(BF538_FAMILY)
836         case IRQ_PORTF_INTA:
837                 irq = IRQ_PF0;
838                 break;
839 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
840         case IRQ_PORTF_INTA:
841                 irq = IRQ_PF0;
842                 break;
843         case IRQ_PORTG_INTA:
844                 irq = IRQ_PG0;
845                 break;
846         case IRQ_PORTH_INTA:
847                 irq = IRQ_PH0;
848                 break;
849 #elif defined(CONFIG_BF561)
850         case IRQ_PROG0_INTA:
851                 irq = IRQ_PF0;
852                 break;
853         case IRQ_PROG1_INTA:
854                 irq = IRQ_PF16;
855                 break;
856         case IRQ_PROG2_INTA:
857                 irq = IRQ_PF32;
858                 break;
859 #endif
860         default:
861                 BUG();
862                 return;
863         }
864
865         bfin_demux_gpio_block(irq);
866 }
867
868 #else
869
870 # ifndef CONFIG_BF60x
871 #define NR_PINT_SYS_IRQS        4
872 #define NR_PINTS                160
873 # else
874 #define NR_PINT_SYS_IRQS        6
875 #define NR_PINTS                112
876 #endif
877
878 #define NR_PINT_BITS            32
879 #define IRQ_NOT_AVAIL           0xFF
880
881 #define PINT_2_BANK(x)          ((x) >> 5)
882 #define PINT_2_BIT(x)           ((x) & 0x1F)
883 #define PINT_BIT(x)             (1 << (PINT_2_BIT(x)))
884
885 static unsigned char irq2pint_lut[NR_PINTS];
886 static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
887
888 static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
889         (struct bfin_pint_regs *)PINT0_MASK_SET,
890         (struct bfin_pint_regs *)PINT1_MASK_SET,
891         (struct bfin_pint_regs *)PINT2_MASK_SET,
892         (struct bfin_pint_regs *)PINT3_MASK_SET,
893 #ifdef CONFIG_BF60x
894         (struct bfin_pint_regs *)PINT4_MASK_SET,
895         (struct bfin_pint_regs *)PINT5_MASK_SET,
896 #endif
897 };
898
899 #ifndef CONFIG_BF60x
900 inline unsigned int get_irq_base(u32 bank, u8 bmap)
901 {
902         unsigned int irq_base;
903
904         if (bank < 2) {         /*PA-PB */
905                 irq_base = IRQ_PA0 + bmap * 16;
906         } else {                /*PC-PJ */
907                 irq_base = IRQ_PC0 + bmap * 16;
908         }
909
910         return irq_base;
911 }
912 #else
913 inline unsigned int get_irq_base(u32 bank, u8 bmap)
914 {
915         unsigned int irq_base;
916
917         irq_base = IRQ_PA0 + bank * 16 + bmap * 16;
918
919         return irq_base;
920 }
921 #endif
922
923         /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
924 void init_pint_lut(void)
925 {
926         u16 bank, bit, irq_base, bit_pos;
927         u32 pint_assign;
928         u8 bmap;
929
930         memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
931
932         for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
933
934                 pint_assign = pint[bank]->assign;
935
936                 for (bit = 0; bit < NR_PINT_BITS; bit++) {
937
938                         bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
939
940                         irq_base = get_irq_base(bank, bmap);
941
942                         irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
943                         bit_pos = bit + bank * NR_PINT_BITS;
944
945                         pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
946                         irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
947                 }
948         }
949 }
950
951 static void bfin_gpio_ack_irq(struct irq_data *d)
952 {
953         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
954         u32 pintbit = PINT_BIT(pint_val);
955         u32 bank = PINT_2_BANK(pint_val);
956
957         if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
958                 if (pint[bank]->invert_set & pintbit)
959                         pint[bank]->invert_clear = pintbit;
960                 else
961                         pint[bank]->invert_set = pintbit;
962         }
963         pint[bank]->request = pintbit;
964
965 }
966
967 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
968 {
969         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
970         u32 pintbit = PINT_BIT(pint_val);
971         u32 bank = PINT_2_BANK(pint_val);
972
973         if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
974                 if (pint[bank]->invert_set & pintbit)
975                         pint[bank]->invert_clear = pintbit;
976                 else
977                         pint[bank]->invert_set = pintbit;
978         }
979
980         pint[bank]->request = pintbit;
981         pint[bank]->mask_clear = pintbit;
982 }
983
984 static void bfin_gpio_mask_irq(struct irq_data *d)
985 {
986         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
987
988         pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
989 }
990
991 static void bfin_gpio_unmask_irq(struct irq_data *d)
992 {
993         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
994         u32 pintbit = PINT_BIT(pint_val);
995         u32 bank = PINT_2_BANK(pint_val);
996
997         pint[bank]->mask_set = pintbit;
998 }
999
1000 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
1001 {
1002         unsigned int irq = d->irq;
1003         u32 gpionr = irq_to_gpio(irq);
1004         u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1005
1006         if (pint_val == IRQ_NOT_AVAIL) {
1007                 printk(KERN_ERR
1008                 "GPIO IRQ %d :Not in PINT Assign table "
1009                 "Reconfigure Interrupt to Port Assignemt\n", irq);
1010                 return -ENODEV;
1011         }
1012
1013         if (__test_and_set_bit(gpionr, gpio_enabled))
1014                 bfin_gpio_irq_prepare(gpionr);
1015
1016         bfin_gpio_unmask_irq(d);
1017
1018         return 0;
1019 }
1020
1021 static void bfin_gpio_irq_shutdown(struct irq_data *d)
1022 {
1023         u32 gpionr = irq_to_gpio(d->irq);
1024
1025         bfin_gpio_mask_irq(d);
1026         __clear_bit(gpionr, gpio_enabled);
1027         bfin_gpio_irq_free(gpionr);
1028 }
1029
1030 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
1031 {
1032         unsigned int irq = d->irq;
1033         int ret;
1034         char buf[16];
1035         u32 gpionr = irq_to_gpio(irq);
1036         u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
1037         u32 pintbit = PINT_BIT(pint_val);
1038         u32 bank = PINT_2_BANK(pint_val);
1039
1040         if (pint_val == IRQ_NOT_AVAIL)
1041                 return -ENODEV;
1042
1043         if (type == IRQ_TYPE_PROBE) {
1044                 /* only probe unenabled GPIO interrupt lines */
1045                 if (test_bit(gpionr, gpio_enabled))
1046                         return 0;
1047                 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
1048         }
1049
1050         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
1051                     IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
1052
1053                 snprintf(buf, 16, "gpio-irq%d", irq);
1054                 ret = bfin_gpio_irq_request(gpionr, buf);
1055                 if (ret)
1056                         return ret;
1057
1058                 if (__test_and_set_bit(gpionr, gpio_enabled))
1059                         bfin_gpio_irq_prepare(gpionr);
1060
1061         } else {
1062                 __clear_bit(gpionr, gpio_enabled);
1063                 return 0;
1064         }
1065
1066         if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
1067                 pint[bank]->invert_set = pintbit;       /* low or falling edge denoted by one */
1068         else
1069                 pint[bank]->invert_clear = pintbit;     /* high or rising edge denoted by zero */
1070
1071         if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
1072             == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1073                 if (gpio_get_value(gpionr))
1074                         pint[bank]->invert_set = pintbit;
1075                 else
1076                         pint[bank]->invert_clear = pintbit;
1077         }
1078
1079         if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
1080                 pint[bank]->edge_set = pintbit;
1081                 bfin_set_irq_handler(irq, handle_edge_irq);
1082         } else {
1083                 pint[bank]->edge_clear = pintbit;
1084                 bfin_set_irq_handler(irq, handle_level_irq);
1085         }
1086
1087         return 0;
1088 }
1089
1090 #ifdef CONFIG_PM
1091 static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
1092 {
1093         u32 pint_irq;
1094         u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
1095         u32 bank = PINT_2_BANK(pint_val);
1096
1097         switch (bank) {
1098         case 0:
1099                 pint_irq = IRQ_PINT0;
1100                 break;
1101         case 2:
1102                 pint_irq = IRQ_PINT2;
1103                 break;
1104         case 3:
1105                 pint_irq = IRQ_PINT3;
1106                 break;
1107         case 1:
1108                 pint_irq = IRQ_PINT1;
1109                 break;
1110         case 4:
1111                 pint_irq = IRQ_PINT4;
1112                 break;
1113         case 5:
1114                 pint_irq = IRQ_PINT5;
1115                 break;
1116         default:
1117                 return -EINVAL;
1118         }
1119
1120         bfin_internal_set_wake(pint_irq, state);
1121
1122         return 0;
1123 }
1124 #else
1125 # define bfin_gpio_set_wake NULL
1126 #endif
1127
1128 void bfin_demux_gpio_irq(unsigned int inta_irq,
1129                         struct irq_desc *desc)
1130 {
1131         u32 bank, pint_val;
1132         u32 request, irq;
1133         u32 level_mask;
1134         int umask = 0;
1135         struct irq_chip *chip = irq_desc_get_chip(desc);
1136
1137         if (chip->irq_mask_ack) {
1138                 chip->irq_mask_ack(&desc->irq_data);
1139         } else {
1140                 chip->irq_mask(&desc->irq_data);
1141                 if (chip->irq_ack)
1142                         chip->irq_ack(&desc->irq_data);
1143         }
1144
1145         switch (inta_irq) {
1146         case IRQ_PINT0:
1147                 bank = 0;
1148                 break;
1149         case IRQ_PINT2:
1150                 bank = 2;
1151                 break;
1152         case IRQ_PINT3:
1153                 bank = 3;
1154                 break;
1155         case IRQ_PINT1:
1156                 bank = 1;
1157                 break;
1158 #ifdef CONFIG_BF60x
1159         case IRQ_PINT4:
1160                 bank = 4;
1161                 break;
1162         case IRQ_PINT5:
1163                 bank = 5;
1164                 break;
1165 #endif
1166         default:
1167                 return;
1168         }
1169
1170         pint_val = bank * NR_PINT_BITS;
1171
1172         request = pint[bank]->request;
1173
1174         level_mask = pint[bank]->edge_set & request;
1175
1176         while (request) {
1177                 if (request & 1) {
1178                         irq = pint2irq_lut[pint_val] + SYS_IRQS;
1179                         if (level_mask & PINT_BIT(pint_val)) {
1180                                 umask = 1;
1181                                 chip->irq_unmask(&desc->irq_data);
1182                         }
1183                         bfin_handle_irq(irq);
1184                 }
1185                 pint_val++;
1186                 request >>= 1;
1187         }
1188
1189         if (!umask)
1190                 chip->irq_unmask(&desc->irq_data);
1191 }
1192 #endif
1193
1194 static struct irq_chip bfin_gpio_irqchip = {
1195         .name = "GPIO",
1196         .irq_ack = bfin_gpio_ack_irq,
1197         .irq_mask = bfin_gpio_mask_irq,
1198         .irq_mask_ack = bfin_gpio_mask_ack_irq,
1199         .irq_unmask = bfin_gpio_unmask_irq,
1200         .irq_disable = bfin_gpio_mask_irq,
1201         .irq_enable = bfin_gpio_unmask_irq,
1202         .irq_set_type = bfin_gpio_irq_type,
1203         .irq_startup = bfin_gpio_irq_startup,
1204         .irq_shutdown = bfin_gpio_irq_shutdown,
1205         .irq_set_wake = bfin_gpio_set_wake,
1206 };
1207
1208 void __cpuinit init_exception_vectors(void)
1209 {
1210         /* cannot program in software:
1211          * evt0 - emulation (jtag)
1212          * evt1 - reset
1213          */
1214         bfin_write_EVT2(evt_nmi);
1215         bfin_write_EVT3(trap);
1216         bfin_write_EVT5(evt_ivhw);
1217         bfin_write_EVT6(evt_timer);
1218         bfin_write_EVT7(evt_evt7);
1219         bfin_write_EVT8(evt_evt8);
1220         bfin_write_EVT9(evt_evt9);
1221         bfin_write_EVT10(evt_evt10);
1222         bfin_write_EVT11(evt_evt11);
1223         bfin_write_EVT12(evt_evt12);
1224         bfin_write_EVT13(evt_evt13);
1225         bfin_write_EVT14(evt_evt14);
1226         bfin_write_EVT15(evt_system_call);
1227         CSYNC();
1228 }
1229
1230 /*
1231  * This function should be called during kernel startup to initialize
1232  * the BFin IRQ handling routines.
1233  */
1234
1235 int __init init_arch_irq(void)
1236 {
1237         int irq;
1238         unsigned long ilat = 0;
1239
1240 #ifndef CONFIG_BF60x
1241         /*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
1242 #ifdef SIC_IMASK0
1243         bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1244         bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1245 # ifdef SIC_IMASK2
1246         bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1247 # endif
1248 # if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1249         bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
1250         bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
1251 # endif
1252 #else
1253         bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
1254 #endif
1255 #else /* CONFIG_BF60x */
1256         bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1257 #endif
1258
1259         local_irq_disable();
1260
1261 #if BFIN_GPIO_PINT
1262 # ifdef CONFIG_PINTx_REASSIGN
1263         pint[0]->assign = CONFIG_PINT0_ASSIGN;
1264         pint[1]->assign = CONFIG_PINT1_ASSIGN;
1265         pint[2]->assign = CONFIG_PINT2_ASSIGN;
1266         pint[3]->assign = CONFIG_PINT3_ASSIGN;
1267 # ifdef CONFIG_BF60x
1268         pint[4]->assign = CONFIG_PINT4_ASSIGN;
1269         pint[5]->assign = CONFIG_PINT5_ASSIGN;
1270 # endif
1271 # endif
1272         /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
1273         init_pint_lut();
1274 #endif
1275
1276         for (irq = 0; irq <= SYS_IRQS; irq++) {
1277                 if (irq <= IRQ_CORETMR)
1278                         irq_set_chip(irq, &bfin_core_irqchip);
1279                 else
1280                         irq_set_chip(irq, &bfin_internal_irqchip);
1281
1282                 switch (irq) {
1283 #ifndef CONFIG_BF60x
1284 #if BFIN_GPIO_PINT
1285                 case IRQ_PINT0:
1286                 case IRQ_PINT1:
1287                 case IRQ_PINT2:
1288                 case IRQ_PINT3:
1289 #elif defined(BF537_FAMILY)
1290                 case IRQ_PH_INTA_MAC_RX:
1291                 case IRQ_PF_INTA_PG_INTA:
1292 #elif defined(BF533_FAMILY)
1293                 case IRQ_PROG_INTA:
1294 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1295                 case IRQ_PORTF_INTA:
1296                 case IRQ_PORTG_INTA:
1297                 case IRQ_PORTH_INTA:
1298 #elif defined(CONFIG_BF561)
1299                 case IRQ_PROG0_INTA:
1300                 case IRQ_PROG1_INTA:
1301                 case IRQ_PROG2_INTA:
1302 #elif defined(BF538_FAMILY)
1303                 case IRQ_PORTF_INTA:
1304 #endif
1305                         irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1306                         break;
1307 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1308                 case IRQ_MAC_ERROR:
1309                         irq_set_chained_handler(irq,
1310                                                 bfin_demux_mac_status_irq);
1311                         break;
1312 #endif
1313 #if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1314                 case IRQ_SUPPLE_0:
1315                 case IRQ_SUPPLE_1:
1316                         irq_set_handler(irq, handle_percpu_irq);
1317                         break;
1318 #endif
1319 #endif
1320
1321 #ifdef CONFIG_TICKSOURCE_CORETMR
1322                 case IRQ_CORETMR:
1323 # ifdef CONFIG_SMP
1324                         irq_set_handler(irq, handle_percpu_irq);
1325 # else
1326                         irq_set_handler(irq, handle_simple_irq);
1327 # endif
1328                         break;
1329 #endif
1330
1331 #ifdef CONFIG_TICKSOURCE_GPTMR0
1332                 case IRQ_TIMER0:
1333                         irq_set_handler(irq, handle_simple_irq);
1334                         break;
1335 #endif
1336
1337                 default:
1338 #ifdef CONFIG_IPIPE
1339                         irq_set_handler(irq, handle_level_irq);
1340 #else
1341                         irq_set_handler(irq, handle_simple_irq);
1342 #endif
1343                         break;
1344                 }
1345         }
1346
1347         init_mach_irq();
1348
1349 #ifndef CONFIG_BF60x
1350 #if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) && !defined(CONFIG_BF60x)
1351         for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1352                 irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1353                                          handle_level_irq);
1354 #endif
1355         /* if configured as edge, then will be changed to do_edge_IRQ */
1356         for (irq = GPIO_IRQ_BASE;
1357                 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1358                 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1359                                          handle_level_irq);
1360 #else
1361         for (irq = BFIN_IRQ(0); irq <= SYS_IRQS; irq++) {
1362                 if (irq < CORE_IRQS) {
1363                         irq_set_chip(irq, &bfin_sec_irqchip);
1364                         __irq_set_handler(irq, handle_sec_fault, 0, NULL);
1365                 } else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
1366                         irq_set_chip(irq, &bfin_sec_irqchip);
1367                         irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1368                 } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1369                         irq_set_chip(irq, &bfin_sec_irqchip);
1370                         irq_set_handler(irq, handle_percpu_irq);
1371                 } else {
1372                         irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1373                                         handle_fasteoi_irq);
1374                         __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1375                 }
1376         }
1377         for (irq = GPIO_IRQ_BASE;
1378                 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1379                 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1380                                         handle_level_irq);
1381 #endif
1382         bfin_write_IMASK(0);
1383         CSYNC();
1384         ilat = bfin_read_ILAT();
1385         CSYNC();
1386         bfin_write_ILAT(ilat);
1387         CSYNC();
1388
1389         printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1390         /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1391          * local_irq_enable()
1392          */
1393 #ifndef CONFIG_BF60x
1394         program_IAR();
1395         /* Therefore it's better to setup IARs before interrupts enabled */
1396         search_IAR();
1397
1398         /* Enable interrupts IVG7-15 */
1399         bfin_irq_flags |= IMASK_IVG15 |
1400                 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1401                 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1402
1403         bfin_sti(bfin_irq_flags);
1404
1405         /* This implicitly covers ANOMALY_05000171
1406          * Boot-ROM code modifies SICA_IWRx wakeup registers
1407          */
1408 #ifdef SIC_IWR0
1409         bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1410 # ifdef SIC_IWR1
1411         /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1412          * will screw up the bootrom as it relies on MDMA0/1 waking it
1413          * up from IDLE instructions.  See this report for more info:
1414          * http://blackfin.uclinux.org/gf/tracker/4323
1415          */
1416         if (ANOMALY_05000435)
1417                 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1418         else
1419                 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1420 # endif
1421 # ifdef SIC_IWR2
1422         bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1423 # endif
1424 #else
1425         bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1426 #endif
1427 #else  /* CONFIG_BF60x */
1428         /* Enable interrupts IVG7-15 */
1429         bfin_irq_flags |= IMASK_IVG15 |
1430             IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1431             IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1432
1433
1434         bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1435         bfin_sec_enable_sci(SIC_SYSIRQ(IRQ_WATCH0));
1436         bfin_sec_enable_ssi(SIC_SYSIRQ(IRQ_WATCH0));
1437         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1438         udelay(100);
1439         bfin_write_SEC_GCTL(SEC_GCTL_EN);
1440         bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1441         init_software_driven_irq();
1442         register_syscore_ops(&sec_pm_syscore_ops);
1443 #endif
1444         return 0;
1445 }
1446
1447 #ifdef CONFIG_DO_IRQ_L1
1448 __attribute__((l1_text))
1449 #endif
1450 static int vec_to_irq(int vec)
1451 {
1452 #ifndef CONFIG_BF60x
1453         struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1454         struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1455         unsigned long sic_status[3];
1456 #endif
1457         if (likely(vec == EVT_IVTMR_P))
1458                 return IRQ_CORETMR;
1459 #ifndef CONFIG_BF60x
1460 #ifdef SIC_ISR
1461         sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1462 #else
1463         if (smp_processor_id()) {
1464 # ifdef SICB_ISR0
1465                 /* This will be optimized out in UP mode. */
1466                 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1467                 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1468 # endif
1469         } else {
1470                 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1471                 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1472         }
1473 #endif
1474 #ifdef SIC_ISR2
1475         sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1476 #endif
1477
1478         for (;; ivg++) {
1479                 if (ivg >= ivg_stop)
1480                         return -1;
1481 #ifdef SIC_ISR
1482                 if (sic_status[0] & ivg->isrflag)
1483 #else
1484                 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1485 #endif
1486                         return ivg->irqno;
1487         }
1488 #else
1489         /* for bf60x read */
1490         return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1491 #endif  /* end of CONFIG_BF60x */
1492 }
1493
1494 #ifdef CONFIG_DO_IRQ_L1
1495 __attribute__((l1_text))
1496 #endif
1497 void do_irq(int vec, struct pt_regs *fp)
1498 {
1499         int irq = vec_to_irq(vec);
1500         if (irq == -1)
1501                 return;
1502         asm_do_IRQ(irq, fp);
1503 }
1504
1505 #ifdef CONFIG_IPIPE
1506
1507 int __ipipe_get_irq_priority(unsigned irq)
1508 {
1509         int ient, prio;
1510
1511         if (irq <= IRQ_CORETMR)
1512                 return irq;
1513
1514         for (ient = 0; ient < NR_PERI_INTS; ient++) {
1515                 struct ivgx *ivg = ivg_table + ient;
1516                 if (ivg->irqno == irq) {
1517                         for (prio = 0; prio <= IVG13-IVG7; prio++) {
1518                                 if (ivg7_13[prio].ifirst <= ivg &&
1519                                     ivg7_13[prio].istop > ivg)
1520                                         return IVG7 + prio;
1521                         }
1522                 }
1523         }
1524
1525         return IVG15;
1526 }
1527
1528 /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1529 #ifdef CONFIG_DO_IRQ_L1
1530 __attribute__((l1_text))
1531 #endif
1532 asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1533 {
1534         struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1535         struct ipipe_domain *this_domain = __ipipe_current_domain;
1536         struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1537         struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1538         int irq, s = 0;
1539
1540         irq = vec_to_irq(vec);
1541         if (irq == -1)
1542                 return 0;
1543
1544         if (irq == IRQ_SYSTMR) {
1545 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1546                 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1547 #endif
1548                 /* This is basically what we need from the register frame. */
1549                 __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1550                 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1551                 if (this_domain != ipipe_root_domain)
1552                         __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1553                 else
1554                         __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1555         }
1556
1557         /*
1558          * We don't want Linux interrupt handlers to run at the
1559          * current core priority level (i.e. < EVT15), since this
1560          * might delay other interrupts handled by a high priority
1561          * domain. Here is what we do instead:
1562          *
1563          * - we raise the SYNCDEFER bit to prevent
1564          * __ipipe_handle_irq() to sync the pipeline for the root
1565          * stage for the incoming interrupt. Upon return, that IRQ is
1566          * pending in the interrupt log.
1567          *
1568          * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1569          * that _schedule_and_signal_from_int will eventually sync the
1570          * pipeline from EVT15.
1571          */
1572         if (this_domain == ipipe_root_domain) {
1573                 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1574                 barrier();
1575         }
1576
1577         ipipe_trace_irq_entry(irq);
1578         __ipipe_handle_irq(irq, regs);
1579         ipipe_trace_irq_exit(irq);
1580
1581         if (user_mode(regs) &&
1582             !ipipe_test_foreign_stack() &&
1583             (current->ipipe_flags & PF_EVTRET) != 0) {
1584                 /*
1585                  * Testing for user_regs() does NOT fully eliminate
1586                  * foreign stack contexts, because of the forged
1587                  * interrupt returns we do through
1588                  * __ipipe_call_irqtail. In that case, we might have
1589                  * preempted a foreign stack context in a high
1590                  * priority domain, with a single interrupt level now
1591                  * pending after the irqtail unwinding is done. In
1592                  * which case user_mode() is now true, and the event
1593                  * gets dispatched spuriously.
1594                  */
1595                 current->ipipe_flags &= ~PF_EVTRET;
1596                 __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1597         }
1598
1599         if (this_domain == ipipe_root_domain) {
1600                 set_thread_flag(TIF_IRQ_SYNC);
1601                 if (!s) {
1602                         __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1603                         return !test_bit(IPIPE_STALL_FLAG, &p->status);
1604                 }
1605         }
1606
1607         return 0;
1608 }
1609
1610 #endif /* CONFIG_IPIPE */