Merge tag 'rpmsg-v4.19' of git://github.com/andersson/remoteproc
[platform/kernel/linux-rpi.git] / kernel / irq / manage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006 Thomas Gleixner
5  *
6  * This file contains driver APIs to the irq subsystem.
7  */
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/sched/rt.h>
19 #include <linux/sched/task.h>
20 #include <uapi/linux/sched/types.h>
21 #include <linux/task_work.h>
22
23 #include "internals.h"
24
25 #ifdef CONFIG_IRQ_FORCED_THREADING
26 __read_mostly bool force_irqthreads;
27 EXPORT_SYMBOL_GPL(force_irqthreads);
28
29 static int __init setup_forced_irqthreads(char *arg)
30 {
31         force_irqthreads = true;
32         return 0;
33 }
34 early_param("threadirqs", setup_forced_irqthreads);
35 #endif
36
37 static void __synchronize_hardirq(struct irq_desc *desc)
38 {
39         bool inprogress;
40
41         do {
42                 unsigned long flags;
43
44                 /*
45                  * Wait until we're out of the critical section.  This might
46                  * give the wrong answer due to the lack of memory barriers.
47                  */
48                 while (irqd_irq_inprogress(&desc->irq_data))
49                         cpu_relax();
50
51                 /* Ok, that indicated we're done: double-check carefully. */
52                 raw_spin_lock_irqsave(&desc->lock, flags);
53                 inprogress = irqd_irq_inprogress(&desc->irq_data);
54                 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56                 /* Oops, that failed? */
57         } while (inprogress);
58 }
59
60 /**
61  *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
62  *      @irq: interrupt number to wait for
63  *
64  *      This function waits for any pending hard IRQ handlers for this
65  *      interrupt to complete before returning. If you use this
66  *      function while holding a resource the IRQ handler may need you
67  *      will deadlock. It does not take associated threaded handlers
68  *      into account.
69  *
70  *      Do not use this for shutdown scenarios where you must be sure
71  *      that all parts (hardirq and threaded handler) have completed.
72  *
73  *      Returns: false if a threaded handler is active.
74  *
75  *      This function may be called - with care - from IRQ context.
76  */
77 bool synchronize_hardirq(unsigned int irq)
78 {
79         struct irq_desc *desc = irq_to_desc(irq);
80
81         if (desc) {
82                 __synchronize_hardirq(desc);
83                 return !atomic_read(&desc->threads_active);
84         }
85
86         return true;
87 }
88 EXPORT_SYMBOL(synchronize_hardirq);
89
90 /**
91  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
92  *      @irq: interrupt number to wait for
93  *
94  *      This function waits for any pending IRQ handlers for this interrupt
95  *      to complete before returning. If you use this function while
96  *      holding a resource the IRQ handler may need you will deadlock.
97  *
98  *      This function may be called - with care - from IRQ context.
99  */
100 void synchronize_irq(unsigned int irq)
101 {
102         struct irq_desc *desc = irq_to_desc(irq);
103
104         if (desc) {
105                 __synchronize_hardirq(desc);
106                 /*
107                  * We made sure that no hardirq handler is
108                  * running. Now verify that no threaded handlers are
109                  * active.
110                  */
111                 wait_event(desc->wait_for_threads,
112                            !atomic_read(&desc->threads_active));
113         }
114 }
115 EXPORT_SYMBOL(synchronize_irq);
116
117 #ifdef CONFIG_SMP
118 cpumask_var_t irq_default_affinity;
119
120 static bool __irq_can_set_affinity(struct irq_desc *desc)
121 {
122         if (!desc || !irqd_can_balance(&desc->irq_data) ||
123             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124                 return false;
125         return true;
126 }
127
128 /**
129  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
130  *      @irq:           Interrupt to check
131  *
132  */
133 int irq_can_set_affinity(unsigned int irq)
134 {
135         return __irq_can_set_affinity(irq_to_desc(irq));
136 }
137
138 /**
139  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
140  * @irq:        Interrupt to check
141  *
142  * Like irq_can_set_affinity() above, but additionally checks for the
143  * AFFINITY_MANAGED flag.
144  */
145 bool irq_can_set_affinity_usr(unsigned int irq)
146 {
147         struct irq_desc *desc = irq_to_desc(irq);
148
149         return __irq_can_set_affinity(desc) &&
150                 !irqd_affinity_is_managed(&desc->irq_data);
151 }
152
153 /**
154  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
155  *      @desc:          irq descriptor which has affitnity changed
156  *
157  *      We just set IRQTF_AFFINITY and delegate the affinity setting
158  *      to the interrupt thread itself. We can not call
159  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
160  *      code can be called from hard interrupt context.
161  */
162 void irq_set_thread_affinity(struct irq_desc *desc)
163 {
164         struct irqaction *action;
165
166         for_each_action_of_desc(desc, action)
167                 if (action->thread)
168                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
169 }
170
171 static void irq_validate_effective_affinity(struct irq_data *data)
172 {
173 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174         const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175         struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177         if (!cpumask_empty(m))
178                 return;
179         pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180                      chip->name, data->irq);
181 #endif
182 }
183
184 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185                         bool force)
186 {
187         struct irq_desc *desc = irq_data_to_desc(data);
188         struct irq_chip *chip = irq_data_get_irq_chip(data);
189         int ret;
190
191         if (!chip || !chip->irq_set_affinity)
192                 return -EINVAL;
193
194         ret = chip->irq_set_affinity(data, mask, force);
195         switch (ret) {
196         case IRQ_SET_MASK_OK:
197         case IRQ_SET_MASK_OK_DONE:
198                 cpumask_copy(desc->irq_common_data.affinity, mask);
199         case IRQ_SET_MASK_OK_NOCOPY:
200                 irq_validate_effective_affinity(data);
201                 irq_set_thread_affinity(desc);
202                 ret = 0;
203         }
204
205         return ret;
206 }
207
208 #ifdef CONFIG_GENERIC_PENDING_IRQ
209 static inline int irq_set_affinity_pending(struct irq_data *data,
210                                            const struct cpumask *dest)
211 {
212         struct irq_desc *desc = irq_data_to_desc(data);
213
214         irqd_set_move_pending(data);
215         irq_copy_pending(desc, dest);
216         return 0;
217 }
218 #else
219 static inline int irq_set_affinity_pending(struct irq_data *data,
220                                            const struct cpumask *dest)
221 {
222         return -EBUSY;
223 }
224 #endif
225
226 static int irq_try_set_affinity(struct irq_data *data,
227                                 const struct cpumask *dest, bool force)
228 {
229         int ret = irq_do_set_affinity(data, dest, force);
230
231         /*
232          * In case that the underlying vector management is busy and the
233          * architecture supports the generic pending mechanism then utilize
234          * this to avoid returning an error to user space.
235          */
236         if (ret == -EBUSY && !force)
237                 ret = irq_set_affinity_pending(data, dest);
238         return ret;
239 }
240
241 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
242                             bool force)
243 {
244         struct irq_chip *chip = irq_data_get_irq_chip(data);
245         struct irq_desc *desc = irq_data_to_desc(data);
246         int ret = 0;
247
248         if (!chip || !chip->irq_set_affinity)
249                 return -EINVAL;
250
251         if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
252                 ret = irq_try_set_affinity(data, mask, force);
253         } else {
254                 irqd_set_move_pending(data);
255                 irq_copy_pending(desc, mask);
256         }
257
258         if (desc->affinity_notify) {
259                 kref_get(&desc->affinity_notify->kref);
260                 schedule_work(&desc->affinity_notify->work);
261         }
262         irqd_set(data, IRQD_AFFINITY_SET);
263
264         return ret;
265 }
266
267 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
268 {
269         struct irq_desc *desc = irq_to_desc(irq);
270         unsigned long flags;
271         int ret;
272
273         if (!desc)
274                 return -EINVAL;
275
276         raw_spin_lock_irqsave(&desc->lock, flags);
277         ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
278         raw_spin_unlock_irqrestore(&desc->lock, flags);
279         return ret;
280 }
281
282 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
283 {
284         unsigned long flags;
285         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
286
287         if (!desc)
288                 return -EINVAL;
289         desc->affinity_hint = m;
290         irq_put_desc_unlock(desc, flags);
291         /* set the initial affinity to prevent every interrupt being on CPU0 */
292         if (m)
293                 __irq_set_affinity(irq, m, false);
294         return 0;
295 }
296 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
297
298 static void irq_affinity_notify(struct work_struct *work)
299 {
300         struct irq_affinity_notify *notify =
301                 container_of(work, struct irq_affinity_notify, work);
302         struct irq_desc *desc = irq_to_desc(notify->irq);
303         cpumask_var_t cpumask;
304         unsigned long flags;
305
306         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
307                 goto out;
308
309         raw_spin_lock_irqsave(&desc->lock, flags);
310         if (irq_move_pending(&desc->irq_data))
311                 irq_get_pending(cpumask, desc);
312         else
313                 cpumask_copy(cpumask, desc->irq_common_data.affinity);
314         raw_spin_unlock_irqrestore(&desc->lock, flags);
315
316         notify->notify(notify, cpumask);
317
318         free_cpumask_var(cpumask);
319 out:
320         kref_put(&notify->kref, notify->release);
321 }
322
323 /**
324  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
325  *      @irq:           Interrupt for which to enable/disable notification
326  *      @notify:        Context for notification, or %NULL to disable
327  *                      notification.  Function pointers must be initialised;
328  *                      the other fields will be initialised by this function.
329  *
330  *      Must be called in process context.  Notification may only be enabled
331  *      after the IRQ is allocated and must be disabled before the IRQ is
332  *      freed using free_irq().
333  */
334 int
335 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
336 {
337         struct irq_desc *desc = irq_to_desc(irq);
338         struct irq_affinity_notify *old_notify;
339         unsigned long flags;
340
341         /* The release function is promised process context */
342         might_sleep();
343
344         if (!desc)
345                 return -EINVAL;
346
347         /* Complete initialisation of *notify */
348         if (notify) {
349                 notify->irq = irq;
350                 kref_init(&notify->kref);
351                 INIT_WORK(&notify->work, irq_affinity_notify);
352         }
353
354         raw_spin_lock_irqsave(&desc->lock, flags);
355         old_notify = desc->affinity_notify;
356         desc->affinity_notify = notify;
357         raw_spin_unlock_irqrestore(&desc->lock, flags);
358
359         if (old_notify)
360                 kref_put(&old_notify->kref, old_notify->release);
361
362         return 0;
363 }
364 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
365
366 #ifndef CONFIG_AUTO_IRQ_AFFINITY
367 /*
368  * Generic version of the affinity autoselector.
369  */
370 int irq_setup_affinity(struct irq_desc *desc)
371 {
372         struct cpumask *set = irq_default_affinity;
373         int ret, node = irq_desc_get_node(desc);
374         static DEFINE_RAW_SPINLOCK(mask_lock);
375         static struct cpumask mask;
376
377         /* Excludes PER_CPU and NO_BALANCE interrupts */
378         if (!__irq_can_set_affinity(desc))
379                 return 0;
380
381         raw_spin_lock(&mask_lock);
382         /*
383          * Preserve the managed affinity setting and a userspace affinity
384          * setup, but make sure that one of the targets is online.
385          */
386         if (irqd_affinity_is_managed(&desc->irq_data) ||
387             irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
388                 if (cpumask_intersects(desc->irq_common_data.affinity,
389                                        cpu_online_mask))
390                         set = desc->irq_common_data.affinity;
391                 else
392                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
393         }
394
395         cpumask_and(&mask, cpu_online_mask, set);
396         if (node != NUMA_NO_NODE) {
397                 const struct cpumask *nodemask = cpumask_of_node(node);
398
399                 /* make sure at least one of the cpus in nodemask is online */
400                 if (cpumask_intersects(&mask, nodemask))
401                         cpumask_and(&mask, &mask, nodemask);
402         }
403         ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
404         raw_spin_unlock(&mask_lock);
405         return ret;
406 }
407 #else
408 /* Wrapper for ALPHA specific affinity selector magic */
409 int irq_setup_affinity(struct irq_desc *desc)
410 {
411         return irq_select_affinity(irq_desc_get_irq(desc));
412 }
413 #endif
414
415 /*
416  * Called when a bogus affinity is set via /proc/irq
417  */
418 int irq_select_affinity_usr(unsigned int irq)
419 {
420         struct irq_desc *desc = irq_to_desc(irq);
421         unsigned long flags;
422         int ret;
423
424         raw_spin_lock_irqsave(&desc->lock, flags);
425         ret = irq_setup_affinity(desc);
426         raw_spin_unlock_irqrestore(&desc->lock, flags);
427         return ret;
428 }
429 #endif
430
431 /**
432  *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
433  *      @irq: interrupt number to set affinity
434  *      @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
435  *                  specific data for percpu_devid interrupts
436  *
437  *      This function uses the vCPU specific data to set the vCPU
438  *      affinity for an irq. The vCPU specific data is passed from
439  *      outside, such as KVM. One example code path is as below:
440  *      KVM -> IOMMU -> irq_set_vcpu_affinity().
441  */
442 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
443 {
444         unsigned long flags;
445         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
446         struct irq_data *data;
447         struct irq_chip *chip;
448         int ret = -ENOSYS;
449
450         if (!desc)
451                 return -EINVAL;
452
453         data = irq_desc_get_irq_data(desc);
454         do {
455                 chip = irq_data_get_irq_chip(data);
456                 if (chip && chip->irq_set_vcpu_affinity)
457                         break;
458 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
459                 data = data->parent_data;
460 #else
461                 data = NULL;
462 #endif
463         } while (data);
464
465         if (data)
466                 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
467         irq_put_desc_unlock(desc, flags);
468
469         return ret;
470 }
471 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
472
473 void __disable_irq(struct irq_desc *desc)
474 {
475         if (!desc->depth++)
476                 irq_disable(desc);
477 }
478
479 static int __disable_irq_nosync(unsigned int irq)
480 {
481         unsigned long flags;
482         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
483
484         if (!desc)
485                 return -EINVAL;
486         __disable_irq(desc);
487         irq_put_desc_busunlock(desc, flags);
488         return 0;
489 }
490
491 /**
492  *      disable_irq_nosync - disable an irq without waiting
493  *      @irq: Interrupt to disable
494  *
495  *      Disable the selected interrupt line.  Disables and Enables are
496  *      nested.
497  *      Unlike disable_irq(), this function does not ensure existing
498  *      instances of the IRQ handler have completed before returning.
499  *
500  *      This function may be called from IRQ context.
501  */
502 void disable_irq_nosync(unsigned int irq)
503 {
504         __disable_irq_nosync(irq);
505 }
506 EXPORT_SYMBOL(disable_irq_nosync);
507
508 /**
509  *      disable_irq - disable an irq and wait for completion
510  *      @irq: Interrupt to disable
511  *
512  *      Disable the selected interrupt line.  Enables and Disables are
513  *      nested.
514  *      This function waits for any pending IRQ handlers for this interrupt
515  *      to complete before returning. If you use this function while
516  *      holding a resource the IRQ handler may need you will deadlock.
517  *
518  *      This function may be called - with care - from IRQ context.
519  */
520 void disable_irq(unsigned int irq)
521 {
522         if (!__disable_irq_nosync(irq))
523                 synchronize_irq(irq);
524 }
525 EXPORT_SYMBOL(disable_irq);
526
527 /**
528  *      disable_hardirq - disables an irq and waits for hardirq completion
529  *      @irq: Interrupt to disable
530  *
531  *      Disable the selected interrupt line.  Enables and Disables are
532  *      nested.
533  *      This function waits for any pending hard IRQ handlers for this
534  *      interrupt to complete before returning. If you use this function while
535  *      holding a resource the hard IRQ handler may need you will deadlock.
536  *
537  *      When used to optimistically disable an interrupt from atomic context
538  *      the return value must be checked.
539  *
540  *      Returns: false if a threaded handler is active.
541  *
542  *      This function may be called - with care - from IRQ context.
543  */
544 bool disable_hardirq(unsigned int irq)
545 {
546         if (!__disable_irq_nosync(irq))
547                 return synchronize_hardirq(irq);
548
549         return false;
550 }
551 EXPORT_SYMBOL_GPL(disable_hardirq);
552
553 void __enable_irq(struct irq_desc *desc)
554 {
555         switch (desc->depth) {
556         case 0:
557  err_out:
558                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
559                      irq_desc_get_irq(desc));
560                 break;
561         case 1: {
562                 if (desc->istate & IRQS_SUSPENDED)
563                         goto err_out;
564                 /* Prevent probing on this irq: */
565                 irq_settings_set_noprobe(desc);
566                 /*
567                  * Call irq_startup() not irq_enable() here because the
568                  * interrupt might be marked NOAUTOEN. So irq_startup()
569                  * needs to be invoked when it gets enabled the first
570                  * time. If it was already started up, then irq_startup()
571                  * will invoke irq_enable() under the hood.
572                  */
573                 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
574                 break;
575         }
576         default:
577                 desc->depth--;
578         }
579 }
580
581 /**
582  *      enable_irq - enable handling of an irq
583  *      @irq: Interrupt to enable
584  *
585  *      Undoes the effect of one call to disable_irq().  If this
586  *      matches the last disable, processing of interrupts on this
587  *      IRQ line is re-enabled.
588  *
589  *      This function may be called from IRQ context only when
590  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
591  */
592 void enable_irq(unsigned int irq)
593 {
594         unsigned long flags;
595         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
596
597         if (!desc)
598                 return;
599         if (WARN(!desc->irq_data.chip,
600                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
601                 goto out;
602
603         __enable_irq(desc);
604 out:
605         irq_put_desc_busunlock(desc, flags);
606 }
607 EXPORT_SYMBOL(enable_irq);
608
609 static int set_irq_wake_real(unsigned int irq, unsigned int on)
610 {
611         struct irq_desc *desc = irq_to_desc(irq);
612         int ret = -ENXIO;
613
614         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
615                 return 0;
616
617         if (desc->irq_data.chip->irq_set_wake)
618                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
619
620         return ret;
621 }
622
623 /**
624  *      irq_set_irq_wake - control irq power management wakeup
625  *      @irq:   interrupt to control
626  *      @on:    enable/disable power management wakeup
627  *
628  *      Enable/disable power management wakeup mode, which is
629  *      disabled by default.  Enables and disables must match,
630  *      just as they match for non-wakeup mode support.
631  *
632  *      Wakeup mode lets this IRQ wake the system from sleep
633  *      states like "suspend to RAM".
634  */
635 int irq_set_irq_wake(unsigned int irq, unsigned int on)
636 {
637         unsigned long flags;
638         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
639         int ret = 0;
640
641         if (!desc)
642                 return -EINVAL;
643
644         /* wakeup-capable irqs can be shared between drivers that
645          * don't need to have the same sleep mode behaviors.
646          */
647         if (on) {
648                 if (desc->wake_depth++ == 0) {
649                         ret = set_irq_wake_real(irq, on);
650                         if (ret)
651                                 desc->wake_depth = 0;
652                         else
653                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
654                 }
655         } else {
656                 if (desc->wake_depth == 0) {
657                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
658                 } else if (--desc->wake_depth == 0) {
659                         ret = set_irq_wake_real(irq, on);
660                         if (ret)
661                                 desc->wake_depth = 1;
662                         else
663                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
664                 }
665         }
666         irq_put_desc_busunlock(desc, flags);
667         return ret;
668 }
669 EXPORT_SYMBOL(irq_set_irq_wake);
670
671 /*
672  * Internal function that tells the architecture code whether a
673  * particular irq has been exclusively allocated or is available
674  * for driver use.
675  */
676 int can_request_irq(unsigned int irq, unsigned long irqflags)
677 {
678         unsigned long flags;
679         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
680         int canrequest = 0;
681
682         if (!desc)
683                 return 0;
684
685         if (irq_settings_can_request(desc)) {
686                 if (!desc->action ||
687                     irqflags & desc->action->flags & IRQF_SHARED)
688                         canrequest = 1;
689         }
690         irq_put_desc_unlock(desc, flags);
691         return canrequest;
692 }
693
694 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
695 {
696         struct irq_chip *chip = desc->irq_data.chip;
697         int ret, unmask = 0;
698
699         if (!chip || !chip->irq_set_type) {
700                 /*
701                  * IRQF_TRIGGER_* but the PIC does not support multiple
702                  * flow-types?
703                  */
704                 pr_debug("No set_type function for IRQ %d (%s)\n",
705                          irq_desc_get_irq(desc),
706                          chip ? (chip->name ? : "unknown") : "unknown");
707                 return 0;
708         }
709
710         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
711                 if (!irqd_irq_masked(&desc->irq_data))
712                         mask_irq(desc);
713                 if (!irqd_irq_disabled(&desc->irq_data))
714                         unmask = 1;
715         }
716
717         /* Mask all flags except trigger mode */
718         flags &= IRQ_TYPE_SENSE_MASK;
719         ret = chip->irq_set_type(&desc->irq_data, flags);
720
721         switch (ret) {
722         case IRQ_SET_MASK_OK:
723         case IRQ_SET_MASK_OK_DONE:
724                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
725                 irqd_set(&desc->irq_data, flags);
726
727         case IRQ_SET_MASK_OK_NOCOPY:
728                 flags = irqd_get_trigger_type(&desc->irq_data);
729                 irq_settings_set_trigger_mask(desc, flags);
730                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
731                 irq_settings_clr_level(desc);
732                 if (flags & IRQ_TYPE_LEVEL_MASK) {
733                         irq_settings_set_level(desc);
734                         irqd_set(&desc->irq_data, IRQD_LEVEL);
735                 }
736
737                 ret = 0;
738                 break;
739         default:
740                 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
741                        flags, irq_desc_get_irq(desc), chip->irq_set_type);
742         }
743         if (unmask)
744                 unmask_irq(desc);
745         return ret;
746 }
747
748 #ifdef CONFIG_HARDIRQS_SW_RESEND
749 int irq_set_parent(int irq, int parent_irq)
750 {
751         unsigned long flags;
752         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
753
754         if (!desc)
755                 return -EINVAL;
756
757         desc->parent_irq = parent_irq;
758
759         irq_put_desc_unlock(desc, flags);
760         return 0;
761 }
762 EXPORT_SYMBOL_GPL(irq_set_parent);
763 #endif
764
765 /*
766  * Default primary interrupt handler for threaded interrupts. Is
767  * assigned as primary handler when request_threaded_irq is called
768  * with handler == NULL. Useful for oneshot interrupts.
769  */
770 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
771 {
772         return IRQ_WAKE_THREAD;
773 }
774
775 /*
776  * Primary handler for nested threaded interrupts. Should never be
777  * called.
778  */
779 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
780 {
781         WARN(1, "Primary handler called for nested irq %d\n", irq);
782         return IRQ_NONE;
783 }
784
785 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
786 {
787         WARN(1, "Secondary action handler called for irq %d\n", irq);
788         return IRQ_NONE;
789 }
790
791 static int irq_wait_for_interrupt(struct irqaction *action)
792 {
793         for (;;) {
794                 set_current_state(TASK_INTERRUPTIBLE);
795
796                 if (kthread_should_stop()) {
797                         /* may need to run one last time */
798                         if (test_and_clear_bit(IRQTF_RUNTHREAD,
799                                                &action->thread_flags)) {
800                                 __set_current_state(TASK_RUNNING);
801                                 return 0;
802                         }
803                         __set_current_state(TASK_RUNNING);
804                         return -1;
805                 }
806
807                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
808                                        &action->thread_flags)) {
809                         __set_current_state(TASK_RUNNING);
810                         return 0;
811                 }
812                 schedule();
813         }
814 }
815
816 /*
817  * Oneshot interrupts keep the irq line masked until the threaded
818  * handler finished. unmask if the interrupt has not been disabled and
819  * is marked MASKED.
820  */
821 static void irq_finalize_oneshot(struct irq_desc *desc,
822                                  struct irqaction *action)
823 {
824         if (!(desc->istate & IRQS_ONESHOT) ||
825             action->handler == irq_forced_secondary_handler)
826                 return;
827 again:
828         chip_bus_lock(desc);
829         raw_spin_lock_irq(&desc->lock);
830
831         /*
832          * Implausible though it may be we need to protect us against
833          * the following scenario:
834          *
835          * The thread is faster done than the hard interrupt handler
836          * on the other CPU. If we unmask the irq line then the
837          * interrupt can come in again and masks the line, leaves due
838          * to IRQS_INPROGRESS and the irq line is masked forever.
839          *
840          * This also serializes the state of shared oneshot handlers
841          * versus "desc->threads_onehsot |= action->thread_mask;" in
842          * irq_wake_thread(). See the comment there which explains the
843          * serialization.
844          */
845         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
846                 raw_spin_unlock_irq(&desc->lock);
847                 chip_bus_sync_unlock(desc);
848                 cpu_relax();
849                 goto again;
850         }
851
852         /*
853          * Now check again, whether the thread should run. Otherwise
854          * we would clear the threads_oneshot bit of this thread which
855          * was just set.
856          */
857         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
858                 goto out_unlock;
859
860         desc->threads_oneshot &= ~action->thread_mask;
861
862         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
863             irqd_irq_masked(&desc->irq_data))
864                 unmask_threaded_irq(desc);
865
866 out_unlock:
867         raw_spin_unlock_irq(&desc->lock);
868         chip_bus_sync_unlock(desc);
869 }
870
871 #ifdef CONFIG_SMP
872 /*
873  * Check whether we need to change the affinity of the interrupt thread.
874  */
875 static void
876 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
877 {
878         cpumask_var_t mask;
879         bool valid = true;
880
881         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
882                 return;
883
884         /*
885          * In case we are out of memory we set IRQTF_AFFINITY again and
886          * try again next time
887          */
888         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
889                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
890                 return;
891         }
892
893         raw_spin_lock_irq(&desc->lock);
894         /*
895          * This code is triggered unconditionally. Check the affinity
896          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
897          */
898         if (cpumask_available(desc->irq_common_data.affinity)) {
899                 const struct cpumask *m;
900
901                 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
902                 cpumask_copy(mask, m);
903         } else {
904                 valid = false;
905         }
906         raw_spin_unlock_irq(&desc->lock);
907
908         if (valid)
909                 set_cpus_allowed_ptr(current, mask);
910         free_cpumask_var(mask);
911 }
912 #else
913 static inline void
914 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
915 #endif
916
917 /*
918  * Interrupts which are not explicitely requested as threaded
919  * interrupts rely on the implicit bh/preempt disable of the hard irq
920  * context. So we need to disable bh here to avoid deadlocks and other
921  * side effects.
922  */
923 static irqreturn_t
924 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
925 {
926         irqreturn_t ret;
927
928         local_bh_disable();
929         ret = action->thread_fn(action->irq, action->dev_id);
930         irq_finalize_oneshot(desc, action);
931         local_bh_enable();
932         return ret;
933 }
934
935 /*
936  * Interrupts explicitly requested as threaded interrupts want to be
937  * preemtible - many of them need to sleep and wait for slow busses to
938  * complete.
939  */
940 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
941                 struct irqaction *action)
942 {
943         irqreturn_t ret;
944
945         ret = action->thread_fn(action->irq, action->dev_id);
946         irq_finalize_oneshot(desc, action);
947         return ret;
948 }
949
950 static void wake_threads_waitq(struct irq_desc *desc)
951 {
952         if (atomic_dec_and_test(&desc->threads_active))
953                 wake_up(&desc->wait_for_threads);
954 }
955
956 static void irq_thread_dtor(struct callback_head *unused)
957 {
958         struct task_struct *tsk = current;
959         struct irq_desc *desc;
960         struct irqaction *action;
961
962         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
963                 return;
964
965         action = kthread_data(tsk);
966
967         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
968                tsk->comm, tsk->pid, action->irq);
969
970
971         desc = irq_to_desc(action->irq);
972         /*
973          * If IRQTF_RUNTHREAD is set, we need to decrement
974          * desc->threads_active and wake possible waiters.
975          */
976         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
977                 wake_threads_waitq(desc);
978
979         /* Prevent a stale desc->threads_oneshot */
980         irq_finalize_oneshot(desc, action);
981 }
982
983 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
984 {
985         struct irqaction *secondary = action->secondary;
986
987         if (WARN_ON_ONCE(!secondary))
988                 return;
989
990         raw_spin_lock_irq(&desc->lock);
991         __irq_wake_thread(desc, secondary);
992         raw_spin_unlock_irq(&desc->lock);
993 }
994
995 /*
996  * Interrupt handler thread
997  */
998 static int irq_thread(void *data)
999 {
1000         struct callback_head on_exit_work;
1001         struct irqaction *action = data;
1002         struct irq_desc *desc = irq_to_desc(action->irq);
1003         irqreturn_t (*handler_fn)(struct irq_desc *desc,
1004                         struct irqaction *action);
1005
1006         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1007                                         &action->thread_flags))
1008                 handler_fn = irq_forced_thread_fn;
1009         else
1010                 handler_fn = irq_thread_fn;
1011
1012         init_task_work(&on_exit_work, irq_thread_dtor);
1013         task_work_add(current, &on_exit_work, false);
1014
1015         irq_thread_check_affinity(desc, action);
1016
1017         while (!irq_wait_for_interrupt(action)) {
1018                 irqreturn_t action_ret;
1019
1020                 irq_thread_check_affinity(desc, action);
1021
1022                 action_ret = handler_fn(desc, action);
1023                 if (action_ret == IRQ_HANDLED)
1024                         atomic_inc(&desc->threads_handled);
1025                 if (action_ret == IRQ_WAKE_THREAD)
1026                         irq_wake_secondary(desc, action);
1027
1028                 wake_threads_waitq(desc);
1029         }
1030
1031         /*
1032          * This is the regular exit path. __free_irq() is stopping the
1033          * thread via kthread_stop() after calling
1034          * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1035          * oneshot mask bit can be set.
1036          */
1037         task_work_cancel(current, irq_thread_dtor);
1038         return 0;
1039 }
1040
1041 /**
1042  *      irq_wake_thread - wake the irq thread for the action identified by dev_id
1043  *      @irq:           Interrupt line
1044  *      @dev_id:        Device identity for which the thread should be woken
1045  *
1046  */
1047 void irq_wake_thread(unsigned int irq, void *dev_id)
1048 {
1049         struct irq_desc *desc = irq_to_desc(irq);
1050         struct irqaction *action;
1051         unsigned long flags;
1052
1053         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1054                 return;
1055
1056         raw_spin_lock_irqsave(&desc->lock, flags);
1057         for_each_action_of_desc(desc, action) {
1058                 if (action->dev_id == dev_id) {
1059                         if (action->thread)
1060                                 __irq_wake_thread(desc, action);
1061                         break;
1062                 }
1063         }
1064         raw_spin_unlock_irqrestore(&desc->lock, flags);
1065 }
1066 EXPORT_SYMBOL_GPL(irq_wake_thread);
1067
1068 static int irq_setup_forced_threading(struct irqaction *new)
1069 {
1070         if (!force_irqthreads)
1071                 return 0;
1072         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1073                 return 0;
1074
1075         /*
1076          * No further action required for interrupts which are requested as
1077          * threaded interrupts already
1078          */
1079         if (new->handler == irq_default_primary_handler)
1080                 return 0;
1081
1082         new->flags |= IRQF_ONESHOT;
1083
1084         /*
1085          * Handle the case where we have a real primary handler and a
1086          * thread handler. We force thread them as well by creating a
1087          * secondary action.
1088          */
1089         if (new->handler && new->thread_fn) {
1090                 /* Allocate the secondary action */
1091                 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1092                 if (!new->secondary)
1093                         return -ENOMEM;
1094                 new->secondary->handler = irq_forced_secondary_handler;
1095                 new->secondary->thread_fn = new->thread_fn;
1096                 new->secondary->dev_id = new->dev_id;
1097                 new->secondary->irq = new->irq;
1098                 new->secondary->name = new->name;
1099         }
1100         /* Deal with the primary handler */
1101         set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1102         new->thread_fn = new->handler;
1103         new->handler = irq_default_primary_handler;
1104         return 0;
1105 }
1106
1107 static int irq_request_resources(struct irq_desc *desc)
1108 {
1109         struct irq_data *d = &desc->irq_data;
1110         struct irq_chip *c = d->chip;
1111
1112         return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1113 }
1114
1115 static void irq_release_resources(struct irq_desc *desc)
1116 {
1117         struct irq_data *d = &desc->irq_data;
1118         struct irq_chip *c = d->chip;
1119
1120         if (c->irq_release_resources)
1121                 c->irq_release_resources(d);
1122 }
1123
1124 static int
1125 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1126 {
1127         struct task_struct *t;
1128         struct sched_param param = {
1129                 .sched_priority = MAX_USER_RT_PRIO/2,
1130         };
1131
1132         if (!secondary) {
1133                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1134                                    new->name);
1135         } else {
1136                 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1137                                    new->name);
1138                 param.sched_priority -= 1;
1139         }
1140
1141         if (IS_ERR(t))
1142                 return PTR_ERR(t);
1143
1144         sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1145
1146         /*
1147          * We keep the reference to the task struct even if
1148          * the thread dies to avoid that the interrupt code
1149          * references an already freed task_struct.
1150          */
1151         get_task_struct(t);
1152         new->thread = t;
1153         /*
1154          * Tell the thread to set its affinity. This is
1155          * important for shared interrupt handlers as we do
1156          * not invoke setup_affinity() for the secondary
1157          * handlers as everything is already set up. Even for
1158          * interrupts marked with IRQF_NO_BALANCE this is
1159          * correct as we want the thread to move to the cpu(s)
1160          * on which the requesting code placed the interrupt.
1161          */
1162         set_bit(IRQTF_AFFINITY, &new->thread_flags);
1163         return 0;
1164 }
1165
1166 /*
1167  * Internal function to register an irqaction - typically used to
1168  * allocate special interrupts that are part of the architecture.
1169  *
1170  * Locking rules:
1171  *
1172  * desc->request_mutex  Provides serialization against a concurrent free_irq()
1173  *   chip_bus_lock      Provides serialization for slow bus operations
1174  *     desc->lock       Provides serialization against hard interrupts
1175  *
1176  * chip_bus_lock and desc->lock are sufficient for all other management and
1177  * interrupt related functions. desc->request_mutex solely serializes
1178  * request/free_irq().
1179  */
1180 static int
1181 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1182 {
1183         struct irqaction *old, **old_ptr;
1184         unsigned long flags, thread_mask = 0;
1185         int ret, nested, shared = 0;
1186
1187         if (!desc)
1188                 return -EINVAL;
1189
1190         if (desc->irq_data.chip == &no_irq_chip)
1191                 return -ENOSYS;
1192         if (!try_module_get(desc->owner))
1193                 return -ENODEV;
1194
1195         new->irq = irq;
1196
1197         /*
1198          * If the trigger type is not specified by the caller,
1199          * then use the default for this interrupt.
1200          */
1201         if (!(new->flags & IRQF_TRIGGER_MASK))
1202                 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1203
1204         /*
1205          * Check whether the interrupt nests into another interrupt
1206          * thread.
1207          */
1208         nested = irq_settings_is_nested_thread(desc);
1209         if (nested) {
1210                 if (!new->thread_fn) {
1211                         ret = -EINVAL;
1212                         goto out_mput;
1213                 }
1214                 /*
1215                  * Replace the primary handler which was provided from
1216                  * the driver for non nested interrupt handling by the
1217                  * dummy function which warns when called.
1218                  */
1219                 new->handler = irq_nested_primary_handler;
1220         } else {
1221                 if (irq_settings_can_thread(desc)) {
1222                         ret = irq_setup_forced_threading(new);
1223                         if (ret)
1224                                 goto out_mput;
1225                 }
1226         }
1227
1228         /*
1229          * Create a handler thread when a thread function is supplied
1230          * and the interrupt does not nest into another interrupt
1231          * thread.
1232          */
1233         if (new->thread_fn && !nested) {
1234                 ret = setup_irq_thread(new, irq, false);
1235                 if (ret)
1236                         goto out_mput;
1237                 if (new->secondary) {
1238                         ret = setup_irq_thread(new->secondary, irq, true);
1239                         if (ret)
1240                                 goto out_thread;
1241                 }
1242         }
1243
1244         /*
1245          * Drivers are often written to work w/o knowledge about the
1246          * underlying irq chip implementation, so a request for a
1247          * threaded irq without a primary hard irq context handler
1248          * requires the ONESHOT flag to be set. Some irq chips like
1249          * MSI based interrupts are per se one shot safe. Check the
1250          * chip flags, so we can avoid the unmask dance at the end of
1251          * the threaded handler for those.
1252          */
1253         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1254                 new->flags &= ~IRQF_ONESHOT;
1255
1256         /*
1257          * Protects against a concurrent __free_irq() call which might wait
1258          * for synchronize_hardirq() to complete without holding the optional
1259          * chip bus lock and desc->lock. Also protects against handing out
1260          * a recycled oneshot thread_mask bit while it's still in use by
1261          * its previous owner.
1262          */
1263         mutex_lock(&desc->request_mutex);
1264
1265         /*
1266          * Acquire bus lock as the irq_request_resources() callback below
1267          * might rely on the serialization or the magic power management
1268          * functions which are abusing the irq_bus_lock() callback,
1269          */
1270         chip_bus_lock(desc);
1271
1272         /* First installed action requests resources. */
1273         if (!desc->action) {
1274                 ret = irq_request_resources(desc);
1275                 if (ret) {
1276                         pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1277                                new->name, irq, desc->irq_data.chip->name);
1278                         goto out_bus_unlock;
1279                 }
1280         }
1281
1282         /*
1283          * The following block of code has to be executed atomically
1284          * protected against a concurrent interrupt and any of the other
1285          * management calls which are not serialized via
1286          * desc->request_mutex or the optional bus lock.
1287          */
1288         raw_spin_lock_irqsave(&desc->lock, flags);
1289         old_ptr = &desc->action;
1290         old = *old_ptr;
1291         if (old) {
1292                 /*
1293                  * Can't share interrupts unless both agree to and are
1294                  * the same type (level, edge, polarity). So both flag
1295                  * fields must have IRQF_SHARED set and the bits which
1296                  * set the trigger type must match. Also all must
1297                  * agree on ONESHOT.
1298                  */
1299                 unsigned int oldtype;
1300
1301                 /*
1302                  * If nobody did set the configuration before, inherit
1303                  * the one provided by the requester.
1304                  */
1305                 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1306                         oldtype = irqd_get_trigger_type(&desc->irq_data);
1307                 } else {
1308                         oldtype = new->flags & IRQF_TRIGGER_MASK;
1309                         irqd_set_trigger_type(&desc->irq_data, oldtype);
1310                 }
1311
1312                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1313                     (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1314                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
1315                         goto mismatch;
1316
1317                 /* All handlers must agree on per-cpuness */
1318                 if ((old->flags & IRQF_PERCPU) !=
1319                     (new->flags & IRQF_PERCPU))
1320                         goto mismatch;
1321
1322                 /* add new interrupt at end of irq queue */
1323                 do {
1324                         /*
1325                          * Or all existing action->thread_mask bits,
1326                          * so we can find the next zero bit for this
1327                          * new action.
1328                          */
1329                         thread_mask |= old->thread_mask;
1330                         old_ptr = &old->next;
1331                         old = *old_ptr;
1332                 } while (old);
1333                 shared = 1;
1334         }
1335
1336         /*
1337          * Setup the thread mask for this irqaction for ONESHOT. For
1338          * !ONESHOT irqs the thread mask is 0 so we can avoid a
1339          * conditional in irq_wake_thread().
1340          */
1341         if (new->flags & IRQF_ONESHOT) {
1342                 /*
1343                  * Unlikely to have 32 resp 64 irqs sharing one line,
1344                  * but who knows.
1345                  */
1346                 if (thread_mask == ~0UL) {
1347                         ret = -EBUSY;
1348                         goto out_unlock;
1349                 }
1350                 /*
1351                  * The thread_mask for the action is or'ed to
1352                  * desc->thread_active to indicate that the
1353                  * IRQF_ONESHOT thread handler has been woken, but not
1354                  * yet finished. The bit is cleared when a thread
1355                  * completes. When all threads of a shared interrupt
1356                  * line have completed desc->threads_active becomes
1357                  * zero and the interrupt line is unmasked. See
1358                  * handle.c:irq_wake_thread() for further information.
1359                  *
1360                  * If no thread is woken by primary (hard irq context)
1361                  * interrupt handlers, then desc->threads_active is
1362                  * also checked for zero to unmask the irq line in the
1363                  * affected hard irq flow handlers
1364                  * (handle_[fasteoi|level]_irq).
1365                  *
1366                  * The new action gets the first zero bit of
1367                  * thread_mask assigned. See the loop above which or's
1368                  * all existing action->thread_mask bits.
1369                  */
1370                 new->thread_mask = 1UL << ffz(thread_mask);
1371
1372         } else if (new->handler == irq_default_primary_handler &&
1373                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1374                 /*
1375                  * The interrupt was requested with handler = NULL, so
1376                  * we use the default primary handler for it. But it
1377                  * does not have the oneshot flag set. In combination
1378                  * with level interrupts this is deadly, because the
1379                  * default primary handler just wakes the thread, then
1380                  * the irq lines is reenabled, but the device still
1381                  * has the level irq asserted. Rinse and repeat....
1382                  *
1383                  * While this works for edge type interrupts, we play
1384                  * it safe and reject unconditionally because we can't
1385                  * say for sure which type this interrupt really
1386                  * has. The type flags are unreliable as the
1387                  * underlying chip implementation can override them.
1388                  */
1389                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1390                        irq);
1391                 ret = -EINVAL;
1392                 goto out_unlock;
1393         }
1394
1395         if (!shared) {
1396                 init_waitqueue_head(&desc->wait_for_threads);
1397
1398                 /* Setup the type (level, edge polarity) if configured: */
1399                 if (new->flags & IRQF_TRIGGER_MASK) {
1400                         ret = __irq_set_trigger(desc,
1401                                                 new->flags & IRQF_TRIGGER_MASK);
1402
1403                         if (ret)
1404                                 goto out_unlock;
1405                 }
1406
1407                 /*
1408                  * Activate the interrupt. That activation must happen
1409                  * independently of IRQ_NOAUTOEN. request_irq() can fail
1410                  * and the callers are supposed to handle
1411                  * that. enable_irq() of an interrupt requested with
1412                  * IRQ_NOAUTOEN is not supposed to fail. The activation
1413                  * keeps it in shutdown mode, it merily associates
1414                  * resources if necessary and if that's not possible it
1415                  * fails. Interrupts which are in managed shutdown mode
1416                  * will simply ignore that activation request.
1417                  */
1418                 ret = irq_activate(desc);
1419                 if (ret)
1420                         goto out_unlock;
1421
1422                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1423                                   IRQS_ONESHOT | IRQS_WAITING);
1424                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1425
1426                 if (new->flags & IRQF_PERCPU) {
1427                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1428                         irq_settings_set_per_cpu(desc);
1429                 }
1430
1431                 if (new->flags & IRQF_ONESHOT)
1432                         desc->istate |= IRQS_ONESHOT;
1433
1434                 /* Exclude IRQ from balancing if requested */
1435                 if (new->flags & IRQF_NOBALANCING) {
1436                         irq_settings_set_no_balancing(desc);
1437                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1438                 }
1439
1440                 if (irq_settings_can_autoenable(desc)) {
1441                         irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1442                 } else {
1443                         /*
1444                          * Shared interrupts do not go well with disabling
1445                          * auto enable. The sharing interrupt might request
1446                          * it while it's still disabled and then wait for
1447                          * interrupts forever.
1448                          */
1449                         WARN_ON_ONCE(new->flags & IRQF_SHARED);
1450                         /* Undo nested disables: */
1451                         desc->depth = 1;
1452                 }
1453
1454         } else if (new->flags & IRQF_TRIGGER_MASK) {
1455                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1456                 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1457
1458                 if (nmsk != omsk)
1459                         /* hope the handler works with current  trigger mode */
1460                         pr_warn("irq %d uses trigger mode %u; requested %u\n",
1461                                 irq, omsk, nmsk);
1462         }
1463
1464         *old_ptr = new;
1465
1466         irq_pm_install_action(desc, new);
1467
1468         /* Reset broken irq detection when installing new handler */
1469         desc->irq_count = 0;
1470         desc->irqs_unhandled = 0;
1471
1472         /*
1473          * Check whether we disabled the irq via the spurious handler
1474          * before. Reenable it and give it another chance.
1475          */
1476         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1477                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1478                 __enable_irq(desc);
1479         }
1480
1481         raw_spin_unlock_irqrestore(&desc->lock, flags);
1482         chip_bus_sync_unlock(desc);
1483         mutex_unlock(&desc->request_mutex);
1484
1485         irq_setup_timings(desc, new);
1486
1487         /*
1488          * Strictly no need to wake it up, but hung_task complains
1489          * when no hard interrupt wakes the thread up.
1490          */
1491         if (new->thread)
1492                 wake_up_process(new->thread);
1493         if (new->secondary)
1494                 wake_up_process(new->secondary->thread);
1495
1496         register_irq_proc(irq, desc);
1497         new->dir = NULL;
1498         register_handler_proc(irq, new);
1499         return 0;
1500
1501 mismatch:
1502         if (!(new->flags & IRQF_PROBE_SHARED)) {
1503                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1504                        irq, new->flags, new->name, old->flags, old->name);
1505 #ifdef CONFIG_DEBUG_SHIRQ
1506                 dump_stack();
1507 #endif
1508         }
1509         ret = -EBUSY;
1510
1511 out_unlock:
1512         raw_spin_unlock_irqrestore(&desc->lock, flags);
1513
1514         if (!desc->action)
1515                 irq_release_resources(desc);
1516 out_bus_unlock:
1517         chip_bus_sync_unlock(desc);
1518         mutex_unlock(&desc->request_mutex);
1519
1520 out_thread:
1521         if (new->thread) {
1522                 struct task_struct *t = new->thread;
1523
1524                 new->thread = NULL;
1525                 kthread_stop(t);
1526                 put_task_struct(t);
1527         }
1528         if (new->secondary && new->secondary->thread) {
1529                 struct task_struct *t = new->secondary->thread;
1530
1531                 new->secondary->thread = NULL;
1532                 kthread_stop(t);
1533                 put_task_struct(t);
1534         }
1535 out_mput:
1536         module_put(desc->owner);
1537         return ret;
1538 }
1539
1540 /**
1541  *      setup_irq - setup an interrupt
1542  *      @irq: Interrupt line to setup
1543  *      @act: irqaction for the interrupt
1544  *
1545  * Used to statically setup interrupts in the early boot process.
1546  */
1547 int setup_irq(unsigned int irq, struct irqaction *act)
1548 {
1549         int retval;
1550         struct irq_desc *desc = irq_to_desc(irq);
1551
1552         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1553                 return -EINVAL;
1554
1555         retval = irq_chip_pm_get(&desc->irq_data);
1556         if (retval < 0)
1557                 return retval;
1558
1559         retval = __setup_irq(irq, desc, act);
1560
1561         if (retval)
1562                 irq_chip_pm_put(&desc->irq_data);
1563
1564         return retval;
1565 }
1566 EXPORT_SYMBOL_GPL(setup_irq);
1567
1568 /*
1569  * Internal function to unregister an irqaction - used to free
1570  * regular and special interrupts that are part of the architecture.
1571  */
1572 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1573 {
1574         unsigned irq = desc->irq_data.irq;
1575         struct irqaction *action, **action_ptr;
1576         unsigned long flags;
1577
1578         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1579
1580         mutex_lock(&desc->request_mutex);
1581         chip_bus_lock(desc);
1582         raw_spin_lock_irqsave(&desc->lock, flags);
1583
1584         /*
1585          * There can be multiple actions per IRQ descriptor, find the right
1586          * one based on the dev_id:
1587          */
1588         action_ptr = &desc->action;
1589         for (;;) {
1590                 action = *action_ptr;
1591
1592                 if (!action) {
1593                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1594                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1595                         chip_bus_sync_unlock(desc);
1596                         mutex_unlock(&desc->request_mutex);
1597                         return NULL;
1598                 }
1599
1600                 if (action->dev_id == dev_id)
1601                         break;
1602                 action_ptr = &action->next;
1603         }
1604
1605         /* Found it - now remove it from the list of entries: */
1606         *action_ptr = action->next;
1607
1608         irq_pm_remove_action(desc, action);
1609
1610         /* If this was the last handler, shut down the IRQ line: */
1611         if (!desc->action) {
1612                 irq_settings_clr_disable_unlazy(desc);
1613                 irq_shutdown(desc);
1614         }
1615
1616 #ifdef CONFIG_SMP
1617         /* make sure affinity_hint is cleaned up */
1618         if (WARN_ON_ONCE(desc->affinity_hint))
1619                 desc->affinity_hint = NULL;
1620 #endif
1621
1622         raw_spin_unlock_irqrestore(&desc->lock, flags);
1623         /*
1624          * Drop bus_lock here so the changes which were done in the chip
1625          * callbacks above are synced out to the irq chips which hang
1626          * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1627          *
1628          * Aside of that the bus_lock can also be taken from the threaded
1629          * handler in irq_finalize_oneshot() which results in a deadlock
1630          * because kthread_stop() would wait forever for the thread to
1631          * complete, which is blocked on the bus lock.
1632          *
1633          * The still held desc->request_mutex() protects against a
1634          * concurrent request_irq() of this irq so the release of resources
1635          * and timing data is properly serialized.
1636          */
1637         chip_bus_sync_unlock(desc);
1638
1639         unregister_handler_proc(irq, action);
1640
1641         /* Make sure it's not being used on another CPU: */
1642         synchronize_hardirq(irq);
1643
1644 #ifdef CONFIG_DEBUG_SHIRQ
1645         /*
1646          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1647          * event to happen even now it's being freed, so let's make sure that
1648          * is so by doing an extra call to the handler ....
1649          *
1650          * ( We do this after actually deregistering it, to make sure that a
1651          *   'real' IRQ doesn't run in parallel with our fake. )
1652          */
1653         if (action->flags & IRQF_SHARED) {
1654                 local_irq_save(flags);
1655                 action->handler(irq, dev_id);
1656                 local_irq_restore(flags);
1657         }
1658 #endif
1659
1660         /*
1661          * The action has already been removed above, but the thread writes
1662          * its oneshot mask bit when it completes. Though request_mutex is
1663          * held across this which prevents __setup_irq() from handing out
1664          * the same bit to a newly requested action.
1665          */
1666         if (action->thread) {
1667                 kthread_stop(action->thread);
1668                 put_task_struct(action->thread);
1669                 if (action->secondary && action->secondary->thread) {
1670                         kthread_stop(action->secondary->thread);
1671                         put_task_struct(action->secondary->thread);
1672                 }
1673         }
1674
1675         /* Last action releases resources */
1676         if (!desc->action) {
1677                 /*
1678                  * Reaquire bus lock as irq_release_resources() might
1679                  * require it to deallocate resources over the slow bus.
1680                  */
1681                 chip_bus_lock(desc);
1682                 irq_release_resources(desc);
1683                 chip_bus_sync_unlock(desc);
1684                 irq_remove_timings(desc);
1685         }
1686
1687         mutex_unlock(&desc->request_mutex);
1688
1689         irq_chip_pm_put(&desc->irq_data);
1690         module_put(desc->owner);
1691         kfree(action->secondary);
1692         return action;
1693 }
1694
1695 /**
1696  *      remove_irq - free an interrupt
1697  *      @irq: Interrupt line to free
1698  *      @act: irqaction for the interrupt
1699  *
1700  * Used to remove interrupts statically setup by the early boot process.
1701  */
1702 void remove_irq(unsigned int irq, struct irqaction *act)
1703 {
1704         struct irq_desc *desc = irq_to_desc(irq);
1705
1706         if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1707                 __free_irq(desc, act->dev_id);
1708 }
1709 EXPORT_SYMBOL_GPL(remove_irq);
1710
1711 /**
1712  *      free_irq - free an interrupt allocated with request_irq
1713  *      @irq: Interrupt line to free
1714  *      @dev_id: Device identity to free
1715  *
1716  *      Remove an interrupt handler. The handler is removed and if the
1717  *      interrupt line is no longer in use by any driver it is disabled.
1718  *      On a shared IRQ the caller must ensure the interrupt is disabled
1719  *      on the card it drives before calling this function. The function
1720  *      does not return until any executing interrupts for this IRQ
1721  *      have completed.
1722  *
1723  *      This function must not be called from interrupt context.
1724  *
1725  *      Returns the devname argument passed to request_irq.
1726  */
1727 const void *free_irq(unsigned int irq, void *dev_id)
1728 {
1729         struct irq_desc *desc = irq_to_desc(irq);
1730         struct irqaction *action;
1731         const char *devname;
1732
1733         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1734                 return NULL;
1735
1736 #ifdef CONFIG_SMP
1737         if (WARN_ON(desc->affinity_notify))
1738                 desc->affinity_notify = NULL;
1739 #endif
1740
1741         action = __free_irq(desc, dev_id);
1742
1743         if (!action)
1744                 return NULL;
1745
1746         devname = action->name;
1747         kfree(action);
1748         return devname;
1749 }
1750 EXPORT_SYMBOL(free_irq);
1751
1752 /**
1753  *      request_threaded_irq - allocate an interrupt line
1754  *      @irq: Interrupt line to allocate
1755  *      @handler: Function to be called when the IRQ occurs.
1756  *                Primary handler for threaded interrupts
1757  *                If NULL and thread_fn != NULL the default
1758  *                primary handler is installed
1759  *      @thread_fn: Function called from the irq handler thread
1760  *                  If NULL, no irq thread is created
1761  *      @irqflags: Interrupt type flags
1762  *      @devname: An ascii name for the claiming device
1763  *      @dev_id: A cookie passed back to the handler function
1764  *
1765  *      This call allocates interrupt resources and enables the
1766  *      interrupt line and IRQ handling. From the point this
1767  *      call is made your handler function may be invoked. Since
1768  *      your handler function must clear any interrupt the board
1769  *      raises, you must take care both to initialise your hardware
1770  *      and to set up the interrupt handler in the right order.
1771  *
1772  *      If you want to set up a threaded irq handler for your device
1773  *      then you need to supply @handler and @thread_fn. @handler is
1774  *      still called in hard interrupt context and has to check
1775  *      whether the interrupt originates from the device. If yes it
1776  *      needs to disable the interrupt on the device and return
1777  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1778  *      @thread_fn. This split handler design is necessary to support
1779  *      shared interrupts.
1780  *
1781  *      Dev_id must be globally unique. Normally the address of the
1782  *      device data structure is used as the cookie. Since the handler
1783  *      receives this value it makes sense to use it.
1784  *
1785  *      If your interrupt is shared you must pass a non NULL dev_id
1786  *      as this is required when freeing the interrupt.
1787  *
1788  *      Flags:
1789  *
1790  *      IRQF_SHARED             Interrupt is shared
1791  *      IRQF_TRIGGER_*          Specify active edge(s) or level
1792  *
1793  */
1794 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1795                          irq_handler_t thread_fn, unsigned long irqflags,
1796                          const char *devname, void *dev_id)
1797 {
1798         struct irqaction *action;
1799         struct irq_desc *desc;
1800         int retval;
1801
1802         if (irq == IRQ_NOTCONNECTED)
1803                 return -ENOTCONN;
1804
1805         /*
1806          * Sanity-check: shared interrupts must pass in a real dev-ID,
1807          * otherwise we'll have trouble later trying to figure out
1808          * which interrupt is which (messes up the interrupt freeing
1809          * logic etc).
1810          *
1811          * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1812          * it cannot be set along with IRQF_NO_SUSPEND.
1813          */
1814         if (((irqflags & IRQF_SHARED) && !dev_id) ||
1815             (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1816             ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1817                 return -EINVAL;
1818
1819         desc = irq_to_desc(irq);
1820         if (!desc)
1821                 return -EINVAL;
1822
1823         if (!irq_settings_can_request(desc) ||
1824             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1825                 return -EINVAL;
1826
1827         if (!handler) {
1828                 if (!thread_fn)
1829                         return -EINVAL;
1830                 handler = irq_default_primary_handler;
1831         }
1832
1833         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1834         if (!action)
1835                 return -ENOMEM;
1836
1837         action->handler = handler;
1838         action->thread_fn = thread_fn;
1839         action->flags = irqflags;
1840         action->name = devname;
1841         action->dev_id = dev_id;
1842
1843         retval = irq_chip_pm_get(&desc->irq_data);
1844         if (retval < 0) {
1845                 kfree(action);
1846                 return retval;
1847         }
1848
1849         retval = __setup_irq(irq, desc, action);
1850
1851         if (retval) {
1852                 irq_chip_pm_put(&desc->irq_data);
1853                 kfree(action->secondary);
1854                 kfree(action);
1855         }
1856
1857 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1858         if (!retval && (irqflags & IRQF_SHARED)) {
1859                 /*
1860                  * It's a shared IRQ -- the driver ought to be prepared for it
1861                  * to happen immediately, so let's make sure....
1862                  * We disable the irq to make sure that a 'real' IRQ doesn't
1863                  * run in parallel with our fake.
1864                  */
1865                 unsigned long flags;
1866
1867                 disable_irq(irq);
1868                 local_irq_save(flags);
1869
1870                 handler(irq, dev_id);
1871
1872                 local_irq_restore(flags);
1873                 enable_irq(irq);
1874         }
1875 #endif
1876         return retval;
1877 }
1878 EXPORT_SYMBOL(request_threaded_irq);
1879
1880 /**
1881  *      request_any_context_irq - allocate an interrupt line
1882  *      @irq: Interrupt line to allocate
1883  *      @handler: Function to be called when the IRQ occurs.
1884  *                Threaded handler for threaded interrupts.
1885  *      @flags: Interrupt type flags
1886  *      @name: An ascii name for the claiming device
1887  *      @dev_id: A cookie passed back to the handler function
1888  *
1889  *      This call allocates interrupt resources and enables the
1890  *      interrupt line and IRQ handling. It selects either a
1891  *      hardirq or threaded handling method depending on the
1892  *      context.
1893  *
1894  *      On failure, it returns a negative value. On success,
1895  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1896  */
1897 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1898                             unsigned long flags, const char *name, void *dev_id)
1899 {
1900         struct irq_desc *desc;
1901         int ret;
1902
1903         if (irq == IRQ_NOTCONNECTED)
1904                 return -ENOTCONN;
1905
1906         desc = irq_to_desc(irq);
1907         if (!desc)
1908                 return -EINVAL;
1909
1910         if (irq_settings_is_nested_thread(desc)) {
1911                 ret = request_threaded_irq(irq, NULL, handler,
1912                                            flags, name, dev_id);
1913                 return !ret ? IRQC_IS_NESTED : ret;
1914         }
1915
1916         ret = request_irq(irq, handler, flags, name, dev_id);
1917         return !ret ? IRQC_IS_HARDIRQ : ret;
1918 }
1919 EXPORT_SYMBOL_GPL(request_any_context_irq);
1920
1921 void enable_percpu_irq(unsigned int irq, unsigned int type)
1922 {
1923         unsigned int cpu = smp_processor_id();
1924         unsigned long flags;
1925         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1926
1927         if (!desc)
1928                 return;
1929
1930         /*
1931          * If the trigger type is not specified by the caller, then
1932          * use the default for this interrupt.
1933          */
1934         type &= IRQ_TYPE_SENSE_MASK;
1935         if (type == IRQ_TYPE_NONE)
1936                 type = irqd_get_trigger_type(&desc->irq_data);
1937
1938         if (type != IRQ_TYPE_NONE) {
1939                 int ret;
1940
1941                 ret = __irq_set_trigger(desc, type);
1942
1943                 if (ret) {
1944                         WARN(1, "failed to set type for IRQ%d\n", irq);
1945                         goto out;
1946                 }
1947         }
1948
1949         irq_percpu_enable(desc, cpu);
1950 out:
1951         irq_put_desc_unlock(desc, flags);
1952 }
1953 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1954
1955 /**
1956  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1957  * @irq:        Linux irq number to check for
1958  *
1959  * Must be called from a non migratable context. Returns the enable
1960  * state of a per cpu interrupt on the current cpu.
1961  */
1962 bool irq_percpu_is_enabled(unsigned int irq)
1963 {
1964         unsigned int cpu = smp_processor_id();
1965         struct irq_desc *desc;
1966         unsigned long flags;
1967         bool is_enabled;
1968
1969         desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1970         if (!desc)
1971                 return false;
1972
1973         is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1974         irq_put_desc_unlock(desc, flags);
1975
1976         return is_enabled;
1977 }
1978 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1979
1980 void disable_percpu_irq(unsigned int irq)
1981 {
1982         unsigned int cpu = smp_processor_id();
1983         unsigned long flags;
1984         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1985
1986         if (!desc)
1987                 return;
1988
1989         irq_percpu_disable(desc, cpu);
1990         irq_put_desc_unlock(desc, flags);
1991 }
1992 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1993
1994 /*
1995  * Internal function to unregister a percpu irqaction.
1996  */
1997 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1998 {
1999         struct irq_desc *desc = irq_to_desc(irq);
2000         struct irqaction *action;
2001         unsigned long flags;
2002
2003         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2004
2005         if (!desc)
2006                 return NULL;
2007
2008         raw_spin_lock_irqsave(&desc->lock, flags);
2009
2010         action = desc->action;
2011         if (!action || action->percpu_dev_id != dev_id) {
2012                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2013                 goto bad;
2014         }
2015
2016         if (!cpumask_empty(desc->percpu_enabled)) {
2017                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2018                      irq, cpumask_first(desc->percpu_enabled));
2019                 goto bad;
2020         }
2021
2022         /* Found it - now remove it from the list of entries: */
2023         desc->action = NULL;
2024
2025         raw_spin_unlock_irqrestore(&desc->lock, flags);
2026
2027         unregister_handler_proc(irq, action);
2028
2029         irq_chip_pm_put(&desc->irq_data);
2030         module_put(desc->owner);
2031         return action;
2032
2033 bad:
2034         raw_spin_unlock_irqrestore(&desc->lock, flags);
2035         return NULL;
2036 }
2037
2038 /**
2039  *      remove_percpu_irq - free a per-cpu interrupt
2040  *      @irq: Interrupt line to free
2041  *      @act: irqaction for the interrupt
2042  *
2043  * Used to remove interrupts statically setup by the early boot process.
2044  */
2045 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2046 {
2047         struct irq_desc *desc = irq_to_desc(irq);
2048
2049         if (desc && irq_settings_is_per_cpu_devid(desc))
2050             __free_percpu_irq(irq, act->percpu_dev_id);
2051 }
2052
2053 /**
2054  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
2055  *      @irq: Interrupt line to free
2056  *      @dev_id: Device identity to free
2057  *
2058  *      Remove a percpu interrupt handler. The handler is removed, but
2059  *      the interrupt line is not disabled. This must be done on each
2060  *      CPU before calling this function. The function does not return
2061  *      until any executing interrupts for this IRQ have completed.
2062  *
2063  *      This function must not be called from interrupt context.
2064  */
2065 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2066 {
2067         struct irq_desc *desc = irq_to_desc(irq);
2068
2069         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2070                 return;
2071
2072         chip_bus_lock(desc);
2073         kfree(__free_percpu_irq(irq, dev_id));
2074         chip_bus_sync_unlock(desc);
2075 }
2076 EXPORT_SYMBOL_GPL(free_percpu_irq);
2077
2078 /**
2079  *      setup_percpu_irq - setup a per-cpu interrupt
2080  *      @irq: Interrupt line to setup
2081  *      @act: irqaction for the interrupt
2082  *
2083  * Used to statically setup per-cpu interrupts in the early boot process.
2084  */
2085 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2086 {
2087         struct irq_desc *desc = irq_to_desc(irq);
2088         int retval;
2089
2090         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2091                 return -EINVAL;
2092
2093         retval = irq_chip_pm_get(&desc->irq_data);
2094         if (retval < 0)
2095                 return retval;
2096
2097         retval = __setup_irq(irq, desc, act);
2098
2099         if (retval)
2100                 irq_chip_pm_put(&desc->irq_data);
2101
2102         return retval;
2103 }
2104
2105 /**
2106  *      __request_percpu_irq - allocate a percpu interrupt line
2107  *      @irq: Interrupt line to allocate
2108  *      @handler: Function to be called when the IRQ occurs.
2109  *      @flags: Interrupt type flags (IRQF_TIMER only)
2110  *      @devname: An ascii name for the claiming device
2111  *      @dev_id: A percpu cookie passed back to the handler function
2112  *
2113  *      This call allocates interrupt resources and enables the
2114  *      interrupt on the local CPU. If the interrupt is supposed to be
2115  *      enabled on other CPUs, it has to be done on each CPU using
2116  *      enable_percpu_irq().
2117  *
2118  *      Dev_id must be globally unique. It is a per-cpu variable, and
2119  *      the handler gets called with the interrupted CPU's instance of
2120  *      that variable.
2121  */
2122 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2123                          unsigned long flags, const char *devname,
2124                          void __percpu *dev_id)
2125 {
2126         struct irqaction *action;
2127         struct irq_desc *desc;
2128         int retval;
2129
2130         if (!dev_id)
2131                 return -EINVAL;
2132
2133         desc = irq_to_desc(irq);
2134         if (!desc || !irq_settings_can_request(desc) ||
2135             !irq_settings_is_per_cpu_devid(desc))
2136                 return -EINVAL;
2137
2138         if (flags && flags != IRQF_TIMER)
2139                 return -EINVAL;
2140
2141         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2142         if (!action)
2143                 return -ENOMEM;
2144
2145         action->handler = handler;
2146         action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2147         action->name = devname;
2148         action->percpu_dev_id = dev_id;
2149
2150         retval = irq_chip_pm_get(&desc->irq_data);
2151         if (retval < 0) {
2152                 kfree(action);
2153                 return retval;
2154         }
2155
2156         retval = __setup_irq(irq, desc, action);
2157
2158         if (retval) {
2159                 irq_chip_pm_put(&desc->irq_data);
2160                 kfree(action);
2161         }
2162
2163         return retval;
2164 }
2165 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2166
2167 /**
2168  *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
2169  *      @irq: Interrupt line that is forwarded to a VM
2170  *      @which: One of IRQCHIP_STATE_* the caller wants to know about
2171  *      @state: a pointer to a boolean where the state is to be storeed
2172  *
2173  *      This call snapshots the internal irqchip state of an
2174  *      interrupt, returning into @state the bit corresponding to
2175  *      stage @which
2176  *
2177  *      This function should be called with preemption disabled if the
2178  *      interrupt controller has per-cpu registers.
2179  */
2180 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2181                           bool *state)
2182 {
2183         struct irq_desc *desc;
2184         struct irq_data *data;
2185         struct irq_chip *chip;
2186         unsigned long flags;
2187         int err = -EINVAL;
2188
2189         desc = irq_get_desc_buslock(irq, &flags, 0);
2190         if (!desc)
2191                 return err;
2192
2193         data = irq_desc_get_irq_data(desc);
2194
2195         do {
2196                 chip = irq_data_get_irq_chip(data);
2197                 if (chip->irq_get_irqchip_state)
2198                         break;
2199 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2200                 data = data->parent_data;
2201 #else
2202                 data = NULL;
2203 #endif
2204         } while (data);
2205
2206         if (data)
2207                 err = chip->irq_get_irqchip_state(data, which, state);
2208
2209         irq_put_desc_busunlock(desc, flags);
2210         return err;
2211 }
2212 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2213
2214 /**
2215  *      irq_set_irqchip_state - set the state of a forwarded interrupt.
2216  *      @irq: Interrupt line that is forwarded to a VM
2217  *      @which: State to be restored (one of IRQCHIP_STATE_*)
2218  *      @val: Value corresponding to @which
2219  *
2220  *      This call sets the internal irqchip state of an interrupt,
2221  *      depending on the value of @which.
2222  *
2223  *      This function should be called with preemption disabled if the
2224  *      interrupt controller has per-cpu registers.
2225  */
2226 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2227                           bool val)
2228 {
2229         struct irq_desc *desc;
2230         struct irq_data *data;
2231         struct irq_chip *chip;
2232         unsigned long flags;
2233         int err = -EINVAL;
2234
2235         desc = irq_get_desc_buslock(irq, &flags, 0);
2236         if (!desc)
2237                 return err;
2238
2239         data = irq_desc_get_irq_data(desc);
2240
2241         do {
2242                 chip = irq_data_get_irq_chip(data);
2243                 if (chip->irq_set_irqchip_state)
2244                         break;
2245 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2246                 data = data->parent_data;
2247 #else
2248                 data = NULL;
2249 #endif
2250         } while (data);
2251
2252         if (data)
2253                 err = chip->irq_set_irqchip_state(data, which, val);
2254
2255         irq_put_desc_busunlock(desc, flags);
2256         return err;
2257 }
2258 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);