scripts: mkbootimg_rpi4: Fix url path to tizen_7.0
[platform/kernel/linux-rpi.git] / kernel / irq / manage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006 Thomas Gleixner
5  *
6  * This file contains driver APIs to the irq subsystem.
7  */
8
9 #define pr_fmt(fmt) "genirq: " fmt
10
11 #include <linux/irq.h>
12 #include <linux/kthread.h>
13 #include <linux/module.h>
14 #include <linux/random.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/sched/rt.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/isolation.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/task_work.h>
24
25 #include "internals.h"
26
27 #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28 __read_mostly bool force_irqthreads;
29 EXPORT_SYMBOL_GPL(force_irqthreads);
30
31 static int __init setup_forced_irqthreads(char *arg)
32 {
33         force_irqthreads = true;
34         return 0;
35 }
36 early_param("threadirqs", setup_forced_irqthreads);
37 #endif
38
39 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40 {
41         struct irq_data *irqd = irq_desc_get_irq_data(desc);
42         bool inprogress;
43
44         do {
45                 unsigned long flags;
46
47                 /*
48                  * Wait until we're out of the critical section.  This might
49                  * give the wrong answer due to the lack of memory barriers.
50                  */
51                 while (irqd_irq_inprogress(&desc->irq_data))
52                         cpu_relax();
53
54                 /* Ok, that indicated we're done: double-check carefully. */
55                 raw_spin_lock_irqsave(&desc->lock, flags);
56                 inprogress = irqd_irq_inprogress(&desc->irq_data);
57
58                 /*
59                  * If requested and supported, check at the chip whether it
60                  * is in flight at the hardware level, i.e. already pending
61                  * in a CPU and waiting for service and acknowledge.
62                  */
63                 if (!inprogress && sync_chip) {
64                         /*
65                          * Ignore the return code. inprogress is only updated
66                          * when the chip supports it.
67                          */
68                         __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
69                                                 &inprogress);
70                 }
71                 raw_spin_unlock_irqrestore(&desc->lock, flags);
72
73                 /* Oops, that failed? */
74         } while (inprogress);
75 }
76
77 /**
78  *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
79  *      @irq: interrupt number to wait for
80  *
81  *      This function waits for any pending hard IRQ handlers for this
82  *      interrupt to complete before returning. If you use this
83  *      function while holding a resource the IRQ handler may need you
84  *      will deadlock. It does not take associated threaded handlers
85  *      into account.
86  *
87  *      Do not use this for shutdown scenarios where you must be sure
88  *      that all parts (hardirq and threaded handler) have completed.
89  *
90  *      Returns: false if a threaded handler is active.
91  *
92  *      This function may be called - with care - from IRQ context.
93  *
94  *      It does not check whether there is an interrupt in flight at the
95  *      hardware level, but not serviced yet, as this might deadlock when
96  *      called with interrupts disabled and the target CPU of the interrupt
97  *      is the current CPU.
98  */
99 bool synchronize_hardirq(unsigned int irq)
100 {
101         struct irq_desc *desc = irq_to_desc(irq);
102
103         if (desc) {
104                 __synchronize_hardirq(desc, false);
105                 return !atomic_read(&desc->threads_active);
106         }
107
108         return true;
109 }
110 EXPORT_SYMBOL(synchronize_hardirq);
111
112 /**
113  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
114  *      @irq: interrupt number to wait for
115  *
116  *      This function waits for any pending IRQ handlers for this interrupt
117  *      to complete before returning. If you use this function while
118  *      holding a resource the IRQ handler may need you will deadlock.
119  *
120  *      Can only be called from preemptible code as it might sleep when
121  *      an interrupt thread is associated to @irq.
122  *
123  *      It optionally makes sure (when the irq chip supports that method)
124  *      that the interrupt is not pending in any CPU and waiting for
125  *      service.
126  */
127 void synchronize_irq(unsigned int irq)
128 {
129         struct irq_desc *desc = irq_to_desc(irq);
130
131         if (desc) {
132                 __synchronize_hardirq(desc, true);
133                 /*
134                  * We made sure that no hardirq handler is
135                  * running. Now verify that no threaded handlers are
136                  * active.
137                  */
138                 wait_event(desc->wait_for_threads,
139                            !atomic_read(&desc->threads_active));
140         }
141 }
142 EXPORT_SYMBOL(synchronize_irq);
143
144 #ifdef CONFIG_SMP
145 cpumask_var_t irq_default_affinity;
146
147 static bool __irq_can_set_affinity(struct irq_desc *desc)
148 {
149         if (!desc || !irqd_can_balance(&desc->irq_data) ||
150             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
151                 return false;
152         return true;
153 }
154
155 /**
156  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
157  *      @irq:           Interrupt to check
158  *
159  */
160 int irq_can_set_affinity(unsigned int irq)
161 {
162         return __irq_can_set_affinity(irq_to_desc(irq));
163 }
164
165 /**
166  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
167  * @irq:        Interrupt to check
168  *
169  * Like irq_can_set_affinity() above, but additionally checks for the
170  * AFFINITY_MANAGED flag.
171  */
172 bool irq_can_set_affinity_usr(unsigned int irq)
173 {
174         struct irq_desc *desc = irq_to_desc(irq);
175
176         return __irq_can_set_affinity(desc) &&
177                 !irqd_affinity_is_managed(&desc->irq_data);
178 }
179
180 /**
181  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
182  *      @desc:          irq descriptor which has affitnity changed
183  *
184  *      We just set IRQTF_AFFINITY and delegate the affinity setting
185  *      to the interrupt thread itself. We can not call
186  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
187  *      code can be called from hard interrupt context.
188  */
189 void irq_set_thread_affinity(struct irq_desc *desc)
190 {
191         struct irqaction *action;
192
193         for_each_action_of_desc(desc, action)
194                 if (action->thread)
195                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
196 }
197
198 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
199 static void irq_validate_effective_affinity(struct irq_data *data)
200 {
201         const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
202         struct irq_chip *chip = irq_data_get_irq_chip(data);
203
204         if (!cpumask_empty(m))
205                 return;
206         pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
207                      chip->name, data->irq);
208 }
209
210 static inline void irq_init_effective_affinity(struct irq_data *data,
211                                                const struct cpumask *mask)
212 {
213         cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
214 }
215 #else
216 static inline void irq_validate_effective_affinity(struct irq_data *data) { }
217 static inline void irq_init_effective_affinity(struct irq_data *data,
218                                                const struct cpumask *mask) { }
219 #endif
220
221 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222                         bool force)
223 {
224         struct irq_desc *desc = irq_data_to_desc(data);
225         struct irq_chip *chip = irq_data_get_irq_chip(data);
226         int ret;
227
228         if (!chip || !chip->irq_set_affinity)
229                 return -EINVAL;
230
231         /*
232          * If this is a managed interrupt and housekeeping is enabled on
233          * it check whether the requested affinity mask intersects with
234          * a housekeeping CPU. If so, then remove the isolated CPUs from
235          * the mask and just keep the housekeeping CPU(s). This prevents
236          * the affinity setter from routing the interrupt to an isolated
237          * CPU to avoid that I/O submitted from a housekeeping CPU causes
238          * interrupts on an isolated one.
239          *
240          * If the masks do not intersect or include online CPU(s) then
241          * keep the requested mask. The isolated target CPUs are only
242          * receiving interrupts when the I/O operation was submitted
243          * directly from them.
244          *
245          * If all housekeeping CPUs in the affinity mask are offline, the
246          * interrupt will be migrated by the CPU hotplug code once a
247          * housekeeping CPU which belongs to the affinity mask comes
248          * online.
249          */
250         if (irqd_affinity_is_managed(data) &&
251             housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
252                 const struct cpumask *hk_mask, *prog_mask;
253
254                 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
255                 static struct cpumask tmp_mask;
256
257                 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
258
259                 raw_spin_lock(&tmp_mask_lock);
260                 cpumask_and(&tmp_mask, mask, hk_mask);
261                 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
262                         prog_mask = mask;
263                 else
264                         prog_mask = &tmp_mask;
265                 ret = chip->irq_set_affinity(data, prog_mask, force);
266                 raw_spin_unlock(&tmp_mask_lock);
267         } else {
268                 ret = chip->irq_set_affinity(data, mask, force);
269         }
270         switch (ret) {
271         case IRQ_SET_MASK_OK:
272         case IRQ_SET_MASK_OK_DONE:
273                 cpumask_copy(desc->irq_common_data.affinity, mask);
274                 fallthrough;
275         case IRQ_SET_MASK_OK_NOCOPY:
276                 irq_validate_effective_affinity(data);
277                 irq_set_thread_affinity(desc);
278                 ret = 0;
279         }
280
281         return ret;
282 }
283
284 #ifdef CONFIG_GENERIC_PENDING_IRQ
285 static inline int irq_set_affinity_pending(struct irq_data *data,
286                                            const struct cpumask *dest)
287 {
288         struct irq_desc *desc = irq_data_to_desc(data);
289
290         irqd_set_move_pending(data);
291         irq_copy_pending(desc, dest);
292         return 0;
293 }
294 #else
295 static inline int irq_set_affinity_pending(struct irq_data *data,
296                                            const struct cpumask *dest)
297 {
298         return -EBUSY;
299 }
300 #endif
301
302 static int irq_try_set_affinity(struct irq_data *data,
303                                 const struct cpumask *dest, bool force)
304 {
305         int ret = irq_do_set_affinity(data, dest, force);
306
307         /*
308          * In case that the underlying vector management is busy and the
309          * architecture supports the generic pending mechanism then utilize
310          * this to avoid returning an error to user space.
311          */
312         if (ret == -EBUSY && !force)
313                 ret = irq_set_affinity_pending(data, dest);
314         return ret;
315 }
316
317 static bool irq_set_affinity_deactivated(struct irq_data *data,
318                                          const struct cpumask *mask, bool force)
319 {
320         struct irq_desc *desc = irq_data_to_desc(data);
321
322         /*
323          * Handle irq chips which can handle affinity only in activated
324          * state correctly
325          *
326          * If the interrupt is not yet activated, just store the affinity
327          * mask and do not call the chip driver at all. On activation the
328          * driver has to make sure anyway that the interrupt is in a
329          * useable state so startup works.
330          */
331         if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
332             irqd_is_activated(data) || !irqd_affinity_on_activate(data))
333                 return false;
334
335         cpumask_copy(desc->irq_common_data.affinity, mask);
336         irq_init_effective_affinity(data, mask);
337         irqd_set(data, IRQD_AFFINITY_SET);
338         return true;
339 }
340
341 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
342                             bool force)
343 {
344         struct irq_chip *chip = irq_data_get_irq_chip(data);
345         struct irq_desc *desc = irq_data_to_desc(data);
346         int ret = 0;
347
348         if (!chip || !chip->irq_set_affinity)
349                 return -EINVAL;
350
351         if (irq_set_affinity_deactivated(data, mask, force))
352                 return 0;
353
354         if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
355                 ret = irq_try_set_affinity(data, mask, force);
356         } else {
357                 irqd_set_move_pending(data);
358                 irq_copy_pending(desc, mask);
359         }
360
361         if (desc->affinity_notify) {
362                 kref_get(&desc->affinity_notify->kref);
363                 if (!schedule_work(&desc->affinity_notify->work)) {
364                         /* Work was already scheduled, drop our extra ref */
365                         kref_put(&desc->affinity_notify->kref,
366                                  desc->affinity_notify->release);
367                 }
368         }
369         irqd_set(data, IRQD_AFFINITY_SET);
370
371         return ret;
372 }
373
374 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
375 {
376         struct irq_desc *desc = irq_to_desc(irq);
377         unsigned long flags;
378         int ret;
379
380         if (!desc)
381                 return -EINVAL;
382
383         raw_spin_lock_irqsave(&desc->lock, flags);
384         ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
385         raw_spin_unlock_irqrestore(&desc->lock, flags);
386         return ret;
387 }
388
389 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
390 {
391         unsigned long flags;
392         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
393
394         if (!desc)
395                 return -EINVAL;
396         desc->affinity_hint = m;
397         irq_put_desc_unlock(desc, flags);
398         /* set the initial affinity to prevent every interrupt being on CPU0 */
399         if (m)
400                 __irq_set_affinity(irq, m, false);
401         return 0;
402 }
403 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
404
405 static void irq_affinity_notify(struct work_struct *work)
406 {
407         struct irq_affinity_notify *notify =
408                 container_of(work, struct irq_affinity_notify, work);
409         struct irq_desc *desc = irq_to_desc(notify->irq);
410         cpumask_var_t cpumask;
411         unsigned long flags;
412
413         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
414                 goto out;
415
416         raw_spin_lock_irqsave(&desc->lock, flags);
417         if (irq_move_pending(&desc->irq_data))
418                 irq_get_pending(cpumask, desc);
419         else
420                 cpumask_copy(cpumask, desc->irq_common_data.affinity);
421         raw_spin_unlock_irqrestore(&desc->lock, flags);
422
423         notify->notify(notify, cpumask);
424
425         free_cpumask_var(cpumask);
426 out:
427         kref_put(&notify->kref, notify->release);
428 }
429
430 /**
431  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
432  *      @irq:           Interrupt for which to enable/disable notification
433  *      @notify:        Context for notification, or %NULL to disable
434  *                      notification.  Function pointers must be initialised;
435  *                      the other fields will be initialised by this function.
436  *
437  *      Must be called in process context.  Notification may only be enabled
438  *      after the IRQ is allocated and must be disabled before the IRQ is
439  *      freed using free_irq().
440  */
441 int
442 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
443 {
444         struct irq_desc *desc = irq_to_desc(irq);
445         struct irq_affinity_notify *old_notify;
446         unsigned long flags;
447
448         /* The release function is promised process context */
449         might_sleep();
450
451         if (!desc || desc->istate & IRQS_NMI)
452                 return -EINVAL;
453
454         /* Complete initialisation of *notify */
455         if (notify) {
456                 notify->irq = irq;
457                 kref_init(&notify->kref);
458                 INIT_WORK(&notify->work, irq_affinity_notify);
459         }
460
461         raw_spin_lock_irqsave(&desc->lock, flags);
462         old_notify = desc->affinity_notify;
463         desc->affinity_notify = notify;
464         raw_spin_unlock_irqrestore(&desc->lock, flags);
465
466         if (old_notify) {
467                 if (cancel_work_sync(&old_notify->work)) {
468                         /* Pending work had a ref, put that one too */
469                         kref_put(&old_notify->kref, old_notify->release);
470                 }
471                 kref_put(&old_notify->kref, old_notify->release);
472         }
473
474         return 0;
475 }
476 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
477
478 #ifndef CONFIG_AUTO_IRQ_AFFINITY
479 /*
480  * Generic version of the affinity autoselector.
481  */
482 int irq_setup_affinity(struct irq_desc *desc)
483 {
484         struct cpumask *set = irq_default_affinity;
485         int ret, node = irq_desc_get_node(desc);
486         static DEFINE_RAW_SPINLOCK(mask_lock);
487         static struct cpumask mask;
488
489         /* Excludes PER_CPU and NO_BALANCE interrupts */
490         if (!__irq_can_set_affinity(desc))
491                 return 0;
492
493         raw_spin_lock(&mask_lock);
494         /*
495          * Preserve the managed affinity setting and a userspace affinity
496          * setup, but make sure that one of the targets is online.
497          */
498         if (irqd_affinity_is_managed(&desc->irq_data) ||
499             irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
500                 if (cpumask_intersects(desc->irq_common_data.affinity,
501                                        cpu_online_mask))
502                         set = desc->irq_common_data.affinity;
503                 else
504                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
505         }
506
507         cpumask_and(&mask, cpu_online_mask, set);
508         if (cpumask_empty(&mask))
509                 cpumask_copy(&mask, cpu_online_mask);
510
511         if (node != NUMA_NO_NODE) {
512                 const struct cpumask *nodemask = cpumask_of_node(node);
513
514                 /* make sure at least one of the cpus in nodemask is online */
515                 if (cpumask_intersects(&mask, nodemask))
516                         cpumask_and(&mask, &mask, nodemask);
517         }
518         ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
519         raw_spin_unlock(&mask_lock);
520         return ret;
521 }
522 #else
523 /* Wrapper for ALPHA specific affinity selector magic */
524 int irq_setup_affinity(struct irq_desc *desc)
525 {
526         return irq_select_affinity(irq_desc_get_irq(desc));
527 }
528 #endif /* CONFIG_AUTO_IRQ_AFFINITY */
529 #endif /* CONFIG_SMP */
530
531
532 /**
533  *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
534  *      @irq: interrupt number to set affinity
535  *      @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
536  *                  specific data for percpu_devid interrupts
537  *
538  *      This function uses the vCPU specific data to set the vCPU
539  *      affinity for an irq. The vCPU specific data is passed from
540  *      outside, such as KVM. One example code path is as below:
541  *      KVM -> IOMMU -> irq_set_vcpu_affinity().
542  */
543 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
544 {
545         unsigned long flags;
546         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
547         struct irq_data *data;
548         struct irq_chip *chip;
549         int ret = -ENOSYS;
550
551         if (!desc)
552                 return -EINVAL;
553
554         data = irq_desc_get_irq_data(desc);
555         do {
556                 chip = irq_data_get_irq_chip(data);
557                 if (chip && chip->irq_set_vcpu_affinity)
558                         break;
559 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
560                 data = data->parent_data;
561 #else
562                 data = NULL;
563 #endif
564         } while (data);
565
566         if (data)
567                 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
568         irq_put_desc_unlock(desc, flags);
569
570         return ret;
571 }
572 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
573
574 void __disable_irq(struct irq_desc *desc)
575 {
576         if (!desc->depth++)
577                 irq_disable(desc);
578 }
579
580 static int __disable_irq_nosync(unsigned int irq)
581 {
582         unsigned long flags;
583         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
584
585         if (!desc)
586                 return -EINVAL;
587         __disable_irq(desc);
588         irq_put_desc_busunlock(desc, flags);
589         return 0;
590 }
591
592 /**
593  *      disable_irq_nosync - disable an irq without waiting
594  *      @irq: Interrupt to disable
595  *
596  *      Disable the selected interrupt line.  Disables and Enables are
597  *      nested.
598  *      Unlike disable_irq(), this function does not ensure existing
599  *      instances of the IRQ handler have completed before returning.
600  *
601  *      This function may be called from IRQ context.
602  */
603 void disable_irq_nosync(unsigned int irq)
604 {
605         __disable_irq_nosync(irq);
606 }
607 EXPORT_SYMBOL(disable_irq_nosync);
608
609 /**
610  *      disable_irq - disable an irq and wait for completion
611  *      @irq: Interrupt to disable
612  *
613  *      Disable the selected interrupt line.  Enables and Disables are
614  *      nested.
615  *      This function waits for any pending IRQ handlers for this interrupt
616  *      to complete before returning. If you use this function while
617  *      holding a resource the IRQ handler may need you will deadlock.
618  *
619  *      This function may be called - with care - from IRQ context.
620  */
621 void disable_irq(unsigned int irq)
622 {
623         if (!__disable_irq_nosync(irq))
624                 synchronize_irq(irq);
625 }
626 EXPORT_SYMBOL(disable_irq);
627
628 /**
629  *      disable_hardirq - disables an irq and waits for hardirq completion
630  *      @irq: Interrupt to disable
631  *
632  *      Disable the selected interrupt line.  Enables and Disables are
633  *      nested.
634  *      This function waits for any pending hard IRQ handlers for this
635  *      interrupt to complete before returning. If you use this function while
636  *      holding a resource the hard IRQ handler may need you will deadlock.
637  *
638  *      When used to optimistically disable an interrupt from atomic context
639  *      the return value must be checked.
640  *
641  *      Returns: false if a threaded handler is active.
642  *
643  *      This function may be called - with care - from IRQ context.
644  */
645 bool disable_hardirq(unsigned int irq)
646 {
647         if (!__disable_irq_nosync(irq))
648                 return synchronize_hardirq(irq);
649
650         return false;
651 }
652 EXPORT_SYMBOL_GPL(disable_hardirq);
653
654 /**
655  *      disable_nmi_nosync - disable an nmi without waiting
656  *      @irq: Interrupt to disable
657  *
658  *      Disable the selected interrupt line. Disables and enables are
659  *      nested.
660  *      The interrupt to disable must have been requested through request_nmi.
661  *      Unlike disable_nmi(), this function does not ensure existing
662  *      instances of the IRQ handler have completed before returning.
663  */
664 void disable_nmi_nosync(unsigned int irq)
665 {
666         disable_irq_nosync(irq);
667 }
668
669 void __enable_irq(struct irq_desc *desc)
670 {
671         switch (desc->depth) {
672         case 0:
673  err_out:
674                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
675                      irq_desc_get_irq(desc));
676                 break;
677         case 1: {
678                 if (desc->istate & IRQS_SUSPENDED)
679                         goto err_out;
680                 /* Prevent probing on this irq: */
681                 irq_settings_set_noprobe(desc);
682                 /*
683                  * Call irq_startup() not irq_enable() here because the
684                  * interrupt might be marked NOAUTOEN. So irq_startup()
685                  * needs to be invoked when it gets enabled the first
686                  * time. If it was already started up, then irq_startup()
687                  * will invoke irq_enable() under the hood.
688                  */
689                 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
690                 break;
691         }
692         default:
693                 desc->depth--;
694         }
695 }
696
697 /**
698  *      enable_irq - enable handling of an irq
699  *      @irq: Interrupt to enable
700  *
701  *      Undoes the effect of one call to disable_irq().  If this
702  *      matches the last disable, processing of interrupts on this
703  *      IRQ line is re-enabled.
704  *
705  *      This function may be called from IRQ context only when
706  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
707  */
708 void enable_irq(unsigned int irq)
709 {
710         unsigned long flags;
711         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
712
713         if (!desc)
714                 return;
715         if (WARN(!desc->irq_data.chip,
716                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
717                 goto out;
718
719         __enable_irq(desc);
720 out:
721         irq_put_desc_busunlock(desc, flags);
722 }
723 EXPORT_SYMBOL(enable_irq);
724
725 /**
726  *      enable_nmi - enable handling of an nmi
727  *      @irq: Interrupt to enable
728  *
729  *      The interrupt to enable must have been requested through request_nmi.
730  *      Undoes the effect of one call to disable_nmi(). If this
731  *      matches the last disable, processing of interrupts on this
732  *      IRQ line is re-enabled.
733  */
734 void enable_nmi(unsigned int irq)
735 {
736         enable_irq(irq);
737 }
738
739 static int set_irq_wake_real(unsigned int irq, unsigned int on)
740 {
741         struct irq_desc *desc = irq_to_desc(irq);
742         int ret = -ENXIO;
743
744         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
745                 return 0;
746
747         if (desc->irq_data.chip->irq_set_wake)
748                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
749
750         return ret;
751 }
752
753 /**
754  *      irq_set_irq_wake - control irq power management wakeup
755  *      @irq:   interrupt to control
756  *      @on:    enable/disable power management wakeup
757  *
758  *      Enable/disable power management wakeup mode, which is
759  *      disabled by default.  Enables and disables must match,
760  *      just as they match for non-wakeup mode support.
761  *
762  *      Wakeup mode lets this IRQ wake the system from sleep
763  *      states like "suspend to RAM".
764  *
765  *      Note: irq enable/disable state is completely orthogonal
766  *      to the enable/disable state of irq wake. An irq can be
767  *      disabled with disable_irq() and still wake the system as
768  *      long as the irq has wake enabled. If this does not hold,
769  *      then the underlying irq chip and the related driver need
770  *      to be investigated.
771  */
772 int irq_set_irq_wake(unsigned int irq, unsigned int on)
773 {
774         unsigned long flags;
775         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
776         int ret = 0;
777
778         if (!desc)
779                 return -EINVAL;
780
781         /* Don't use NMIs as wake up interrupts please */
782         if (desc->istate & IRQS_NMI) {
783                 ret = -EINVAL;
784                 goto out_unlock;
785         }
786
787         /* wakeup-capable irqs can be shared between drivers that
788          * don't need to have the same sleep mode behaviors.
789          */
790         if (on) {
791                 if (desc->wake_depth++ == 0) {
792                         ret = set_irq_wake_real(irq, on);
793                         if (ret)
794                                 desc->wake_depth = 0;
795                         else
796                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
797                 }
798         } else {
799                 if (desc->wake_depth == 0) {
800                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
801                 } else if (--desc->wake_depth == 0) {
802                         ret = set_irq_wake_real(irq, on);
803                         if (ret)
804                                 desc->wake_depth = 1;
805                         else
806                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
807                 }
808         }
809
810 out_unlock:
811         irq_put_desc_busunlock(desc, flags);
812         return ret;
813 }
814 EXPORT_SYMBOL(irq_set_irq_wake);
815
816 /*
817  * Internal function that tells the architecture code whether a
818  * particular irq has been exclusively allocated or is available
819  * for driver use.
820  */
821 int can_request_irq(unsigned int irq, unsigned long irqflags)
822 {
823         unsigned long flags;
824         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
825         int canrequest = 0;
826
827         if (!desc)
828                 return 0;
829
830         if (irq_settings_can_request(desc)) {
831                 if (!desc->action ||
832                     irqflags & desc->action->flags & IRQF_SHARED)
833                         canrequest = 1;
834         }
835         irq_put_desc_unlock(desc, flags);
836         return canrequest;
837 }
838
839 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
840 {
841         struct irq_chip *chip = desc->irq_data.chip;
842         int ret, unmask = 0;
843
844         if (!chip || !chip->irq_set_type) {
845                 /*
846                  * IRQF_TRIGGER_* but the PIC does not support multiple
847                  * flow-types?
848                  */
849                 pr_debug("No set_type function for IRQ %d (%s)\n",
850                          irq_desc_get_irq(desc),
851                          chip ? (chip->name ? : "unknown") : "unknown");
852                 return 0;
853         }
854
855         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
856                 if (!irqd_irq_masked(&desc->irq_data))
857                         mask_irq(desc);
858                 if (!irqd_irq_disabled(&desc->irq_data))
859                         unmask = 1;
860         }
861
862         /* Mask all flags except trigger mode */
863         flags &= IRQ_TYPE_SENSE_MASK;
864         ret = chip->irq_set_type(&desc->irq_data, flags);
865
866         switch (ret) {
867         case IRQ_SET_MASK_OK:
868         case IRQ_SET_MASK_OK_DONE:
869                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
870                 irqd_set(&desc->irq_data, flags);
871                 fallthrough;
872
873         case IRQ_SET_MASK_OK_NOCOPY:
874                 flags = irqd_get_trigger_type(&desc->irq_data);
875                 irq_settings_set_trigger_mask(desc, flags);
876                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
877                 irq_settings_clr_level(desc);
878                 if (flags & IRQ_TYPE_LEVEL_MASK) {
879                         irq_settings_set_level(desc);
880                         irqd_set(&desc->irq_data, IRQD_LEVEL);
881                 }
882
883                 ret = 0;
884                 break;
885         default:
886                 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
887                        flags, irq_desc_get_irq(desc), chip->irq_set_type);
888         }
889         if (unmask)
890                 unmask_irq(desc);
891         return ret;
892 }
893
894 #ifdef CONFIG_HARDIRQS_SW_RESEND
895 int irq_set_parent(int irq, int parent_irq)
896 {
897         unsigned long flags;
898         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
899
900         if (!desc)
901                 return -EINVAL;
902
903         desc->parent_irq = parent_irq;
904
905         irq_put_desc_unlock(desc, flags);
906         return 0;
907 }
908 EXPORT_SYMBOL_GPL(irq_set_parent);
909 #endif
910
911 /*
912  * Default primary interrupt handler for threaded interrupts. Is
913  * assigned as primary handler when request_threaded_irq is called
914  * with handler == NULL. Useful for oneshot interrupts.
915  */
916 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
917 {
918         return IRQ_WAKE_THREAD;
919 }
920
921 /*
922  * Primary handler for nested threaded interrupts. Should never be
923  * called.
924  */
925 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
926 {
927         WARN(1, "Primary handler called for nested irq %d\n", irq);
928         return IRQ_NONE;
929 }
930
931 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
932 {
933         WARN(1, "Secondary action handler called for irq %d\n", irq);
934         return IRQ_NONE;
935 }
936
937 static int irq_wait_for_interrupt(struct irqaction *action)
938 {
939         for (;;) {
940                 set_current_state(TASK_INTERRUPTIBLE);
941
942                 if (kthread_should_stop()) {
943                         /* may need to run one last time */
944                         if (test_and_clear_bit(IRQTF_RUNTHREAD,
945                                                &action->thread_flags)) {
946                                 __set_current_state(TASK_RUNNING);
947                                 return 0;
948                         }
949                         __set_current_state(TASK_RUNNING);
950                         return -1;
951                 }
952
953                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
954                                        &action->thread_flags)) {
955                         __set_current_state(TASK_RUNNING);
956                         return 0;
957                 }
958                 schedule();
959         }
960 }
961
962 /*
963  * Oneshot interrupts keep the irq line masked until the threaded
964  * handler finished. unmask if the interrupt has not been disabled and
965  * is marked MASKED.
966  */
967 static void irq_finalize_oneshot(struct irq_desc *desc,
968                                  struct irqaction *action)
969 {
970         if (!(desc->istate & IRQS_ONESHOT) ||
971             action->handler == irq_forced_secondary_handler)
972                 return;
973 again:
974         chip_bus_lock(desc);
975         raw_spin_lock_irq(&desc->lock);
976
977         /*
978          * Implausible though it may be we need to protect us against
979          * the following scenario:
980          *
981          * The thread is faster done than the hard interrupt handler
982          * on the other CPU. If we unmask the irq line then the
983          * interrupt can come in again and masks the line, leaves due
984          * to IRQS_INPROGRESS and the irq line is masked forever.
985          *
986          * This also serializes the state of shared oneshot handlers
987          * versus "desc->threads_onehsot |= action->thread_mask;" in
988          * irq_wake_thread(). See the comment there which explains the
989          * serialization.
990          */
991         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
992                 raw_spin_unlock_irq(&desc->lock);
993                 chip_bus_sync_unlock(desc);
994                 cpu_relax();
995                 goto again;
996         }
997
998         /*
999          * Now check again, whether the thread should run. Otherwise
1000          * we would clear the threads_oneshot bit of this thread which
1001          * was just set.
1002          */
1003         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1004                 goto out_unlock;
1005
1006         desc->threads_oneshot &= ~action->thread_mask;
1007
1008         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1009             irqd_irq_masked(&desc->irq_data))
1010                 unmask_threaded_irq(desc);
1011
1012 out_unlock:
1013         raw_spin_unlock_irq(&desc->lock);
1014         chip_bus_sync_unlock(desc);
1015 }
1016
1017 #ifdef CONFIG_SMP
1018 /*
1019  * Check whether we need to change the affinity of the interrupt thread.
1020  */
1021 static void
1022 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1023 {
1024         cpumask_var_t mask;
1025         bool valid = true;
1026
1027         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1028                 return;
1029
1030         /*
1031          * In case we are out of memory we set IRQTF_AFFINITY again and
1032          * try again next time
1033          */
1034         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1035                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1036                 return;
1037         }
1038
1039         raw_spin_lock_irq(&desc->lock);
1040         /*
1041          * This code is triggered unconditionally. Check the affinity
1042          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1043          */
1044         if (cpumask_available(desc->irq_common_data.affinity)) {
1045                 const struct cpumask *m;
1046
1047                 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1048                 cpumask_copy(mask, m);
1049         } else {
1050                 valid = false;
1051         }
1052         raw_spin_unlock_irq(&desc->lock);
1053
1054         if (valid)
1055                 set_cpus_allowed_ptr(current, mask);
1056         free_cpumask_var(mask);
1057 }
1058 #else
1059 static inline void
1060 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1061 #endif
1062
1063 /*
1064  * Interrupts which are not explicitly requested as threaded
1065  * interrupts rely on the implicit bh/preempt disable of the hard irq
1066  * context. So we need to disable bh here to avoid deadlocks and other
1067  * side effects.
1068  */
1069 static irqreturn_t
1070 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1071 {
1072         irqreturn_t ret;
1073
1074         local_bh_disable();
1075         if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1076                 local_irq_disable();
1077         ret = action->thread_fn(action->irq, action->dev_id);
1078         if (ret == IRQ_HANDLED)
1079                 atomic_inc(&desc->threads_handled);
1080
1081         irq_finalize_oneshot(desc, action);
1082         if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1083                 local_irq_enable();
1084         local_bh_enable();
1085         return ret;
1086 }
1087
1088 /*
1089  * Interrupts explicitly requested as threaded interrupts want to be
1090  * preemtible - many of them need to sleep and wait for slow busses to
1091  * complete.
1092  */
1093 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1094                 struct irqaction *action)
1095 {
1096         irqreturn_t ret;
1097
1098         ret = action->thread_fn(action->irq, action->dev_id);
1099         if (ret == IRQ_HANDLED)
1100                 atomic_inc(&desc->threads_handled);
1101
1102         irq_finalize_oneshot(desc, action);
1103         return ret;
1104 }
1105
1106 static void wake_threads_waitq(struct irq_desc *desc)
1107 {
1108         if (atomic_dec_and_test(&desc->threads_active))
1109                 wake_up(&desc->wait_for_threads);
1110 }
1111
1112 static void irq_thread_dtor(struct callback_head *unused)
1113 {
1114         struct task_struct *tsk = current;
1115         struct irq_desc *desc;
1116         struct irqaction *action;
1117
1118         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1119                 return;
1120
1121         action = kthread_data(tsk);
1122
1123         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1124                tsk->comm, tsk->pid, action->irq);
1125
1126
1127         desc = irq_to_desc(action->irq);
1128         /*
1129          * If IRQTF_RUNTHREAD is set, we need to decrement
1130          * desc->threads_active and wake possible waiters.
1131          */
1132         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1133                 wake_threads_waitq(desc);
1134
1135         /* Prevent a stale desc->threads_oneshot */
1136         irq_finalize_oneshot(desc, action);
1137 }
1138
1139 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1140 {
1141         struct irqaction *secondary = action->secondary;
1142
1143         if (WARN_ON_ONCE(!secondary))
1144                 return;
1145
1146         raw_spin_lock_irq(&desc->lock);
1147         __irq_wake_thread(desc, secondary);
1148         raw_spin_unlock_irq(&desc->lock);
1149 }
1150
1151 /*
1152  * Interrupt handler thread
1153  */
1154 static int irq_thread(void *data)
1155 {
1156         struct callback_head on_exit_work;
1157         struct irqaction *action = data;
1158         struct irq_desc *desc = irq_to_desc(action->irq);
1159         irqreturn_t (*handler_fn)(struct irq_desc *desc,
1160                         struct irqaction *action);
1161
1162         sched_set_fifo(current);
1163
1164         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1165                                         &action->thread_flags))
1166                 handler_fn = irq_forced_thread_fn;
1167         else
1168                 handler_fn = irq_thread_fn;
1169
1170         init_task_work(&on_exit_work, irq_thread_dtor);
1171         task_work_add(current, &on_exit_work, TWA_NONE);
1172
1173         irq_thread_check_affinity(desc, action);
1174
1175         while (!irq_wait_for_interrupt(action)) {
1176                 irqreturn_t action_ret;
1177
1178                 irq_thread_check_affinity(desc, action);
1179
1180                 action_ret = handler_fn(desc, action);
1181                 if (action_ret == IRQ_WAKE_THREAD)
1182                         irq_wake_secondary(desc, action);
1183
1184                 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1185                         migrate_disable();
1186                         add_interrupt_randomness(action->irq, 0,
1187                                  desc->random_ip ^ (unsigned long) action);
1188                         migrate_enable();
1189                 }
1190                 wake_threads_waitq(desc);
1191         }
1192
1193         /*
1194          * This is the regular exit path. __free_irq() is stopping the
1195          * thread via kthread_stop() after calling
1196          * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1197          * oneshot mask bit can be set.
1198          */
1199         task_work_cancel(current, irq_thread_dtor);
1200         return 0;
1201 }
1202
1203 /**
1204  *      irq_wake_thread - wake the irq thread for the action identified by dev_id
1205  *      @irq:           Interrupt line
1206  *      @dev_id:        Device identity for which the thread should be woken
1207  *
1208  */
1209 void irq_wake_thread(unsigned int irq, void *dev_id)
1210 {
1211         struct irq_desc *desc = irq_to_desc(irq);
1212         struct irqaction *action;
1213         unsigned long flags;
1214
1215         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1216                 return;
1217
1218         raw_spin_lock_irqsave(&desc->lock, flags);
1219         for_each_action_of_desc(desc, action) {
1220                 if (action->dev_id == dev_id) {
1221                         if (action->thread)
1222                                 __irq_wake_thread(desc, action);
1223                         break;
1224                 }
1225         }
1226         raw_spin_unlock_irqrestore(&desc->lock, flags);
1227 }
1228 EXPORT_SYMBOL_GPL(irq_wake_thread);
1229
1230 static int irq_setup_forced_threading(struct irqaction *new)
1231 {
1232         if (!force_irqthreads)
1233                 return 0;
1234         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1235                 return 0;
1236
1237         /*
1238          * No further action required for interrupts which are requested as
1239          * threaded interrupts already
1240          */
1241         if (new->handler == irq_default_primary_handler)
1242                 return 0;
1243
1244         new->flags |= IRQF_ONESHOT;
1245
1246         /*
1247          * Handle the case where we have a real primary handler and a
1248          * thread handler. We force thread them as well by creating a
1249          * secondary action.
1250          */
1251         if (new->handler && new->thread_fn) {
1252                 /* Allocate the secondary action */
1253                 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1254                 if (!new->secondary)
1255                         return -ENOMEM;
1256                 new->secondary->handler = irq_forced_secondary_handler;
1257                 new->secondary->thread_fn = new->thread_fn;
1258                 new->secondary->dev_id = new->dev_id;
1259                 new->secondary->irq = new->irq;
1260                 new->secondary->name = new->name;
1261         }
1262         /* Deal with the primary handler */
1263         set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1264         new->thread_fn = new->handler;
1265         new->handler = irq_default_primary_handler;
1266         return 0;
1267 }
1268
1269 static int irq_request_resources(struct irq_desc *desc)
1270 {
1271         struct irq_data *d = &desc->irq_data;
1272         struct irq_chip *c = d->chip;
1273
1274         return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1275 }
1276
1277 static void irq_release_resources(struct irq_desc *desc)
1278 {
1279         struct irq_data *d = &desc->irq_data;
1280         struct irq_chip *c = d->chip;
1281
1282         if (c->irq_release_resources)
1283                 c->irq_release_resources(d);
1284 }
1285
1286 static bool irq_supports_nmi(struct irq_desc *desc)
1287 {
1288         struct irq_data *d = irq_desc_get_irq_data(desc);
1289
1290 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1291         /* Only IRQs directly managed by the root irqchip can be set as NMI */
1292         if (d->parent_data)
1293                 return false;
1294 #endif
1295         /* Don't support NMIs for chips behind a slow bus */
1296         if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1297                 return false;
1298
1299         return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1300 }
1301
1302 static int irq_nmi_setup(struct irq_desc *desc)
1303 {
1304         struct irq_data *d = irq_desc_get_irq_data(desc);
1305         struct irq_chip *c = d->chip;
1306
1307         return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1308 }
1309
1310 static void irq_nmi_teardown(struct irq_desc *desc)
1311 {
1312         struct irq_data *d = irq_desc_get_irq_data(desc);
1313         struct irq_chip *c = d->chip;
1314
1315         if (c->irq_nmi_teardown)
1316                 c->irq_nmi_teardown(d);
1317 }
1318
1319 static int
1320 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1321 {
1322         struct task_struct *t;
1323
1324         if (!secondary) {
1325                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1326                                    new->name);
1327         } else {
1328                 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1329                                    new->name);
1330         }
1331
1332         if (IS_ERR(t))
1333                 return PTR_ERR(t);
1334
1335         /*
1336          * We keep the reference to the task struct even if
1337          * the thread dies to avoid that the interrupt code
1338          * references an already freed task_struct.
1339          */
1340         new->thread = get_task_struct(t);
1341         /*
1342          * Tell the thread to set its affinity. This is
1343          * important for shared interrupt handlers as we do
1344          * not invoke setup_affinity() for the secondary
1345          * handlers as everything is already set up. Even for
1346          * interrupts marked with IRQF_NO_BALANCE this is
1347          * correct as we want the thread to move to the cpu(s)
1348          * on which the requesting code placed the interrupt.
1349          */
1350         set_bit(IRQTF_AFFINITY, &new->thread_flags);
1351         return 0;
1352 }
1353
1354 /*
1355  * Internal function to register an irqaction - typically used to
1356  * allocate special interrupts that are part of the architecture.
1357  *
1358  * Locking rules:
1359  *
1360  * desc->request_mutex  Provides serialization against a concurrent free_irq()
1361  *   chip_bus_lock      Provides serialization for slow bus operations
1362  *     desc->lock       Provides serialization against hard interrupts
1363  *
1364  * chip_bus_lock and desc->lock are sufficient for all other management and
1365  * interrupt related functions. desc->request_mutex solely serializes
1366  * request/free_irq().
1367  */
1368 static int
1369 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1370 {
1371         struct irqaction *old, **old_ptr;
1372         unsigned long flags, thread_mask = 0;
1373         int ret, nested, shared = 0;
1374
1375         if (!desc)
1376                 return -EINVAL;
1377
1378         if (desc->irq_data.chip == &no_irq_chip)
1379                 return -ENOSYS;
1380         if (!try_module_get(desc->owner))
1381                 return -ENODEV;
1382
1383         new->irq = irq;
1384
1385         /*
1386          * If the trigger type is not specified by the caller,
1387          * then use the default for this interrupt.
1388          */
1389         if (!(new->flags & IRQF_TRIGGER_MASK))
1390                 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1391
1392         /*
1393          * Check whether the interrupt nests into another interrupt
1394          * thread.
1395          */
1396         nested = irq_settings_is_nested_thread(desc);
1397         if (nested) {
1398                 if (!new->thread_fn) {
1399                         ret = -EINVAL;
1400                         goto out_mput;
1401                 }
1402                 /*
1403                  * Replace the primary handler which was provided from
1404                  * the driver for non nested interrupt handling by the
1405                  * dummy function which warns when called.
1406                  */
1407                 new->handler = irq_nested_primary_handler;
1408         } else {
1409                 if (irq_settings_can_thread(desc)) {
1410                         ret = irq_setup_forced_threading(new);
1411                         if (ret)
1412                                 goto out_mput;
1413                 }
1414         }
1415
1416         /*
1417          * Create a handler thread when a thread function is supplied
1418          * and the interrupt does not nest into another interrupt
1419          * thread.
1420          */
1421         if (new->thread_fn && !nested) {
1422                 ret = setup_irq_thread(new, irq, false);
1423                 if (ret)
1424                         goto out_mput;
1425                 if (new->secondary) {
1426                         ret = setup_irq_thread(new->secondary, irq, true);
1427                         if (ret)
1428                                 goto out_thread;
1429                 }
1430         }
1431
1432         /*
1433          * Drivers are often written to work w/o knowledge about the
1434          * underlying irq chip implementation, so a request for a
1435          * threaded irq without a primary hard irq context handler
1436          * requires the ONESHOT flag to be set. Some irq chips like
1437          * MSI based interrupts are per se one shot safe. Check the
1438          * chip flags, so we can avoid the unmask dance at the end of
1439          * the threaded handler for those.
1440          */
1441         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1442                 new->flags &= ~IRQF_ONESHOT;
1443
1444         /*
1445          * Protects against a concurrent __free_irq() call which might wait
1446          * for synchronize_hardirq() to complete without holding the optional
1447          * chip bus lock and desc->lock. Also protects against handing out
1448          * a recycled oneshot thread_mask bit while it's still in use by
1449          * its previous owner.
1450          */
1451         mutex_lock(&desc->request_mutex);
1452
1453         /*
1454          * Acquire bus lock as the irq_request_resources() callback below
1455          * might rely on the serialization or the magic power management
1456          * functions which are abusing the irq_bus_lock() callback,
1457          */
1458         chip_bus_lock(desc);
1459
1460         /* First installed action requests resources. */
1461         if (!desc->action) {
1462                 ret = irq_request_resources(desc);
1463                 if (ret) {
1464                         pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1465                                new->name, irq, desc->irq_data.chip->name);
1466                         goto out_bus_unlock;
1467                 }
1468         }
1469
1470         /*
1471          * The following block of code has to be executed atomically
1472          * protected against a concurrent interrupt and any of the other
1473          * management calls which are not serialized via
1474          * desc->request_mutex or the optional bus lock.
1475          */
1476         raw_spin_lock_irqsave(&desc->lock, flags);
1477         old_ptr = &desc->action;
1478         old = *old_ptr;
1479         if (old) {
1480                 /*
1481                  * Can't share interrupts unless both agree to and are
1482                  * the same type (level, edge, polarity). So both flag
1483                  * fields must have IRQF_SHARED set and the bits which
1484                  * set the trigger type must match. Also all must
1485                  * agree on ONESHOT.
1486                  * Interrupt lines used for NMIs cannot be shared.
1487                  */
1488                 unsigned int oldtype;
1489
1490                 if (desc->istate & IRQS_NMI) {
1491                         pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1492                                 new->name, irq, desc->irq_data.chip->name);
1493                         ret = -EINVAL;
1494                         goto out_unlock;
1495                 }
1496
1497                 /*
1498                  * If nobody did set the configuration before, inherit
1499                  * the one provided by the requester.
1500                  */
1501                 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1502                         oldtype = irqd_get_trigger_type(&desc->irq_data);
1503                 } else {
1504                         oldtype = new->flags & IRQF_TRIGGER_MASK;
1505                         irqd_set_trigger_type(&desc->irq_data, oldtype);
1506                 }
1507
1508                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1509                     (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1510                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
1511                         goto mismatch;
1512
1513                 /* All handlers must agree on per-cpuness */
1514                 if ((old->flags & IRQF_PERCPU) !=
1515                     (new->flags & IRQF_PERCPU))
1516                         goto mismatch;
1517
1518                 /* add new interrupt at end of irq queue */
1519                 do {
1520                         /*
1521                          * Or all existing action->thread_mask bits,
1522                          * so we can find the next zero bit for this
1523                          * new action.
1524                          */
1525                         thread_mask |= old->thread_mask;
1526                         old_ptr = &old->next;
1527                         old = *old_ptr;
1528                 } while (old);
1529                 shared = 1;
1530         }
1531
1532         /*
1533          * Setup the thread mask for this irqaction for ONESHOT. For
1534          * !ONESHOT irqs the thread mask is 0 so we can avoid a
1535          * conditional in irq_wake_thread().
1536          */
1537         if (new->flags & IRQF_ONESHOT) {
1538                 /*
1539                  * Unlikely to have 32 resp 64 irqs sharing one line,
1540                  * but who knows.
1541                  */
1542                 if (thread_mask == ~0UL) {
1543                         ret = -EBUSY;
1544                         goto out_unlock;
1545                 }
1546                 /*
1547                  * The thread_mask for the action is or'ed to
1548                  * desc->thread_active to indicate that the
1549                  * IRQF_ONESHOT thread handler has been woken, but not
1550                  * yet finished. The bit is cleared when a thread
1551                  * completes. When all threads of a shared interrupt
1552                  * line have completed desc->threads_active becomes
1553                  * zero and the interrupt line is unmasked. See
1554                  * handle.c:irq_wake_thread() for further information.
1555                  *
1556                  * If no thread is woken by primary (hard irq context)
1557                  * interrupt handlers, then desc->threads_active is
1558                  * also checked for zero to unmask the irq line in the
1559                  * affected hard irq flow handlers
1560                  * (handle_[fasteoi|level]_irq).
1561                  *
1562                  * The new action gets the first zero bit of
1563                  * thread_mask assigned. See the loop above which or's
1564                  * all existing action->thread_mask bits.
1565                  */
1566                 new->thread_mask = 1UL << ffz(thread_mask);
1567
1568         } else if (new->handler == irq_default_primary_handler &&
1569                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1570                 /*
1571                  * The interrupt was requested with handler = NULL, so
1572                  * we use the default primary handler for it. But it
1573                  * does not have the oneshot flag set. In combination
1574                  * with level interrupts this is deadly, because the
1575                  * default primary handler just wakes the thread, then
1576                  * the irq lines is reenabled, but the device still
1577                  * has the level irq asserted. Rinse and repeat....
1578                  *
1579                  * While this works for edge type interrupts, we play
1580                  * it safe and reject unconditionally because we can't
1581                  * say for sure which type this interrupt really
1582                  * has. The type flags are unreliable as the
1583                  * underlying chip implementation can override them.
1584                  */
1585                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1586                        new->name, irq);
1587                 ret = -EINVAL;
1588                 goto out_unlock;
1589         }
1590
1591         if (!shared) {
1592                 init_waitqueue_head(&desc->wait_for_threads);
1593
1594                 /* Setup the type (level, edge polarity) if configured: */
1595                 if (new->flags & IRQF_TRIGGER_MASK) {
1596                         ret = __irq_set_trigger(desc,
1597                                                 new->flags & IRQF_TRIGGER_MASK);
1598
1599                         if (ret)
1600                                 goto out_unlock;
1601                 }
1602
1603                 /*
1604                  * Activate the interrupt. That activation must happen
1605                  * independently of IRQ_NOAUTOEN. request_irq() can fail
1606                  * and the callers are supposed to handle
1607                  * that. enable_irq() of an interrupt requested with
1608                  * IRQ_NOAUTOEN is not supposed to fail. The activation
1609                  * keeps it in shutdown mode, it merily associates
1610                  * resources if necessary and if that's not possible it
1611                  * fails. Interrupts which are in managed shutdown mode
1612                  * will simply ignore that activation request.
1613                  */
1614                 ret = irq_activate(desc);
1615                 if (ret)
1616                         goto out_unlock;
1617
1618                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1619                                   IRQS_ONESHOT | IRQS_WAITING);
1620                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1621
1622                 if (new->flags & IRQF_PERCPU) {
1623                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1624                         irq_settings_set_per_cpu(desc);
1625                 }
1626
1627                 if (new->flags & IRQF_ONESHOT)
1628                         desc->istate |= IRQS_ONESHOT;
1629
1630                 /* Exclude IRQ from balancing if requested */
1631                 if (new->flags & IRQF_NOBALANCING) {
1632                         irq_settings_set_no_balancing(desc);
1633                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1634                 }
1635
1636                 if (irq_settings_can_autoenable(desc)) {
1637                         irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1638                 } else {
1639                         /*
1640                          * Shared interrupts do not go well with disabling
1641                          * auto enable. The sharing interrupt might request
1642                          * it while it's still disabled and then wait for
1643                          * interrupts forever.
1644                          */
1645                         WARN_ON_ONCE(new->flags & IRQF_SHARED);
1646                         /* Undo nested disables: */
1647                         desc->depth = 1;
1648                 }
1649
1650         } else if (new->flags & IRQF_TRIGGER_MASK) {
1651                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1652                 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1653
1654                 if (nmsk != omsk)
1655                         /* hope the handler works with current  trigger mode */
1656                         pr_warn("irq %d uses trigger mode %u; requested %u\n",
1657                                 irq, omsk, nmsk);
1658         }
1659
1660         *old_ptr = new;
1661
1662         irq_pm_install_action(desc, new);
1663
1664         /* Reset broken irq detection when installing new handler */
1665         desc->irq_count = 0;
1666         desc->irqs_unhandled = 0;
1667
1668         /*
1669          * Check whether we disabled the irq via the spurious handler
1670          * before. Reenable it and give it another chance.
1671          */
1672         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1673                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1674                 __enable_irq(desc);
1675         }
1676
1677         raw_spin_unlock_irqrestore(&desc->lock, flags);
1678         chip_bus_sync_unlock(desc);
1679         mutex_unlock(&desc->request_mutex);
1680
1681         irq_setup_timings(desc, new);
1682
1683         /*
1684          * Strictly no need to wake it up, but hung_task complains
1685          * when no hard interrupt wakes the thread up.
1686          */
1687         if (new->thread)
1688                 wake_up_process(new->thread);
1689         if (new->secondary)
1690                 wake_up_process(new->secondary->thread);
1691
1692         register_irq_proc(irq, desc);
1693         new->dir = NULL;
1694         register_handler_proc(irq, new);
1695         return 0;
1696
1697 mismatch:
1698         if (!(new->flags & IRQF_PROBE_SHARED)) {
1699                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1700                        irq, new->flags, new->name, old->flags, old->name);
1701 #ifdef CONFIG_DEBUG_SHIRQ
1702                 dump_stack();
1703 #endif
1704         }
1705         ret = -EBUSY;
1706
1707 out_unlock:
1708         raw_spin_unlock_irqrestore(&desc->lock, flags);
1709
1710         if (!desc->action)
1711                 irq_release_resources(desc);
1712 out_bus_unlock:
1713         chip_bus_sync_unlock(desc);
1714         mutex_unlock(&desc->request_mutex);
1715
1716 out_thread:
1717         if (new->thread) {
1718                 struct task_struct *t = new->thread;
1719
1720                 new->thread = NULL;
1721                 kthread_stop(t);
1722                 put_task_struct(t);
1723         }
1724         if (new->secondary && new->secondary->thread) {
1725                 struct task_struct *t = new->secondary->thread;
1726
1727                 new->secondary->thread = NULL;
1728                 kthread_stop(t);
1729                 put_task_struct(t);
1730         }
1731 out_mput:
1732         module_put(desc->owner);
1733         return ret;
1734 }
1735
1736 /*
1737  * Internal function to unregister an irqaction - used to free
1738  * regular and special interrupts that are part of the architecture.
1739  */
1740 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1741 {
1742         unsigned irq = desc->irq_data.irq;
1743         struct irqaction *action, **action_ptr;
1744         unsigned long flags;
1745
1746         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1747
1748         mutex_lock(&desc->request_mutex);
1749         chip_bus_lock(desc);
1750         raw_spin_lock_irqsave(&desc->lock, flags);
1751
1752         /*
1753          * There can be multiple actions per IRQ descriptor, find the right
1754          * one based on the dev_id:
1755          */
1756         action_ptr = &desc->action;
1757         for (;;) {
1758                 action = *action_ptr;
1759
1760                 if (!action) {
1761                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1762                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1763                         chip_bus_sync_unlock(desc);
1764                         mutex_unlock(&desc->request_mutex);
1765                         return NULL;
1766                 }
1767
1768                 if (action->dev_id == dev_id)
1769                         break;
1770                 action_ptr = &action->next;
1771         }
1772
1773         /* Found it - now remove it from the list of entries: */
1774         *action_ptr = action->next;
1775
1776         irq_pm_remove_action(desc, action);
1777
1778         /* If this was the last handler, shut down the IRQ line: */
1779         if (!desc->action) {
1780                 irq_settings_clr_disable_unlazy(desc);
1781                 /* Only shutdown. Deactivate after synchronize_hardirq() */
1782                 irq_shutdown(desc);
1783         }
1784
1785 #ifdef CONFIG_SMP
1786         /* make sure affinity_hint is cleaned up */
1787         if (WARN_ON_ONCE(desc->affinity_hint))
1788                 desc->affinity_hint = NULL;
1789 #endif
1790
1791         raw_spin_unlock_irqrestore(&desc->lock, flags);
1792         /*
1793          * Drop bus_lock here so the changes which were done in the chip
1794          * callbacks above are synced out to the irq chips which hang
1795          * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1796          *
1797          * Aside of that the bus_lock can also be taken from the threaded
1798          * handler in irq_finalize_oneshot() which results in a deadlock
1799          * because kthread_stop() would wait forever for the thread to
1800          * complete, which is blocked on the bus lock.
1801          *
1802          * The still held desc->request_mutex() protects against a
1803          * concurrent request_irq() of this irq so the release of resources
1804          * and timing data is properly serialized.
1805          */
1806         chip_bus_sync_unlock(desc);
1807
1808         unregister_handler_proc(irq, action);
1809
1810         /*
1811          * Make sure it's not being used on another CPU and if the chip
1812          * supports it also make sure that there is no (not yet serviced)
1813          * interrupt in flight at the hardware level.
1814          */
1815         __synchronize_hardirq(desc, true);
1816
1817 #ifdef CONFIG_DEBUG_SHIRQ
1818         /*
1819          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1820          * event to happen even now it's being freed, so let's make sure that
1821          * is so by doing an extra call to the handler ....
1822          *
1823          * ( We do this after actually deregistering it, to make sure that a
1824          *   'real' IRQ doesn't run in parallel with our fake. )
1825          */
1826         if (action->flags & IRQF_SHARED) {
1827                 local_irq_save(flags);
1828                 action->handler(irq, dev_id);
1829                 local_irq_restore(flags);
1830         }
1831 #endif
1832
1833         /*
1834          * The action has already been removed above, but the thread writes
1835          * its oneshot mask bit when it completes. Though request_mutex is
1836          * held across this which prevents __setup_irq() from handing out
1837          * the same bit to a newly requested action.
1838          */
1839         if (action->thread) {
1840                 kthread_stop(action->thread);
1841                 put_task_struct(action->thread);
1842                 if (action->secondary && action->secondary->thread) {
1843                         kthread_stop(action->secondary->thread);
1844                         put_task_struct(action->secondary->thread);
1845                 }
1846         }
1847
1848         /* Last action releases resources */
1849         if (!desc->action) {
1850                 /*
1851                  * Reaquire bus lock as irq_release_resources() might
1852                  * require it to deallocate resources over the slow bus.
1853                  */
1854                 chip_bus_lock(desc);
1855                 /*
1856                  * There is no interrupt on the fly anymore. Deactivate it
1857                  * completely.
1858                  */
1859                 raw_spin_lock_irqsave(&desc->lock, flags);
1860                 irq_domain_deactivate_irq(&desc->irq_data);
1861                 raw_spin_unlock_irqrestore(&desc->lock, flags);
1862
1863                 irq_release_resources(desc);
1864                 chip_bus_sync_unlock(desc);
1865                 irq_remove_timings(desc);
1866         }
1867
1868         mutex_unlock(&desc->request_mutex);
1869
1870         irq_chip_pm_put(&desc->irq_data);
1871         module_put(desc->owner);
1872         kfree(action->secondary);
1873         return action;
1874 }
1875
1876 /**
1877  *      free_irq - free an interrupt allocated with request_irq
1878  *      @irq: Interrupt line to free
1879  *      @dev_id: Device identity to free
1880  *
1881  *      Remove an interrupt handler. The handler is removed and if the
1882  *      interrupt line is no longer in use by any driver it is disabled.
1883  *      On a shared IRQ the caller must ensure the interrupt is disabled
1884  *      on the card it drives before calling this function. The function
1885  *      does not return until any executing interrupts for this IRQ
1886  *      have completed.
1887  *
1888  *      This function must not be called from interrupt context.
1889  *
1890  *      Returns the devname argument passed to request_irq.
1891  */
1892 const void *free_irq(unsigned int irq, void *dev_id)
1893 {
1894         struct irq_desc *desc = irq_to_desc(irq);
1895         struct irqaction *action;
1896         const char *devname;
1897
1898         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1899                 return NULL;
1900
1901 #ifdef CONFIG_SMP
1902         if (WARN_ON(desc->affinity_notify))
1903                 desc->affinity_notify = NULL;
1904 #endif
1905
1906         action = __free_irq(desc, dev_id);
1907
1908         if (!action)
1909                 return NULL;
1910
1911         devname = action->name;
1912         kfree(action);
1913         return devname;
1914 }
1915 EXPORT_SYMBOL(free_irq);
1916
1917 /* This function must be called with desc->lock held */
1918 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1919 {
1920         const char *devname = NULL;
1921
1922         desc->istate &= ~IRQS_NMI;
1923
1924         if (!WARN_ON(desc->action == NULL)) {
1925                 irq_pm_remove_action(desc, desc->action);
1926                 devname = desc->action->name;
1927                 unregister_handler_proc(irq, desc->action);
1928
1929                 kfree(desc->action);
1930                 desc->action = NULL;
1931         }
1932
1933         irq_settings_clr_disable_unlazy(desc);
1934         irq_shutdown_and_deactivate(desc);
1935
1936         irq_release_resources(desc);
1937
1938         irq_chip_pm_put(&desc->irq_data);
1939         module_put(desc->owner);
1940
1941         return devname;
1942 }
1943
1944 const void *free_nmi(unsigned int irq, void *dev_id)
1945 {
1946         struct irq_desc *desc = irq_to_desc(irq);
1947         unsigned long flags;
1948         const void *devname;
1949
1950         if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1951                 return NULL;
1952
1953         if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1954                 return NULL;
1955
1956         /* NMI still enabled */
1957         if (WARN_ON(desc->depth == 0))
1958                 disable_nmi_nosync(irq);
1959
1960         raw_spin_lock_irqsave(&desc->lock, flags);
1961
1962         irq_nmi_teardown(desc);
1963         devname = __cleanup_nmi(irq, desc);
1964
1965         raw_spin_unlock_irqrestore(&desc->lock, flags);
1966
1967         return devname;
1968 }
1969
1970 /**
1971  *      request_threaded_irq - allocate an interrupt line
1972  *      @irq: Interrupt line to allocate
1973  *      @handler: Function to be called when the IRQ occurs.
1974  *                Primary handler for threaded interrupts
1975  *                If NULL and thread_fn != NULL the default
1976  *                primary handler is installed
1977  *      @thread_fn: Function called from the irq handler thread
1978  *                  If NULL, no irq thread is created
1979  *      @irqflags: Interrupt type flags
1980  *      @devname: An ascii name for the claiming device
1981  *      @dev_id: A cookie passed back to the handler function
1982  *
1983  *      This call allocates interrupt resources and enables the
1984  *      interrupt line and IRQ handling. From the point this
1985  *      call is made your handler function may be invoked. Since
1986  *      your handler function must clear any interrupt the board
1987  *      raises, you must take care both to initialise your hardware
1988  *      and to set up the interrupt handler in the right order.
1989  *
1990  *      If you want to set up a threaded irq handler for your device
1991  *      then you need to supply @handler and @thread_fn. @handler is
1992  *      still called in hard interrupt context and has to check
1993  *      whether the interrupt originates from the device. If yes it
1994  *      needs to disable the interrupt on the device and return
1995  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1996  *      @thread_fn. This split handler design is necessary to support
1997  *      shared interrupts.
1998  *
1999  *      Dev_id must be globally unique. Normally the address of the
2000  *      device data structure is used as the cookie. Since the handler
2001  *      receives this value it makes sense to use it.
2002  *
2003  *      If your interrupt is shared you must pass a non NULL dev_id
2004  *      as this is required when freeing the interrupt.
2005  *
2006  *      Flags:
2007  *
2008  *      IRQF_SHARED             Interrupt is shared
2009  *      IRQF_TRIGGER_*          Specify active edge(s) or level
2010  *
2011  */
2012 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2013                          irq_handler_t thread_fn, unsigned long irqflags,
2014                          const char *devname, void *dev_id)
2015 {
2016         struct irqaction *action;
2017         struct irq_desc *desc;
2018         int retval;
2019
2020         if (irq == IRQ_NOTCONNECTED)
2021                 return -ENOTCONN;
2022
2023         /*
2024          * Sanity-check: shared interrupts must pass in a real dev-ID,
2025          * otherwise we'll have trouble later trying to figure out
2026          * which interrupt is which (messes up the interrupt freeing
2027          * logic etc).
2028          *
2029          * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2030          * it cannot be set along with IRQF_NO_SUSPEND.
2031          */
2032         if (((irqflags & IRQF_SHARED) && !dev_id) ||
2033             (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2034             ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2035                 return -EINVAL;
2036
2037         desc = irq_to_desc(irq);
2038         if (!desc)
2039                 return -EINVAL;
2040
2041         if (!irq_settings_can_request(desc) ||
2042             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2043                 return -EINVAL;
2044
2045         if (!handler) {
2046                 if (!thread_fn)
2047                         return -EINVAL;
2048                 handler = irq_default_primary_handler;
2049         }
2050
2051         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2052         if (!action)
2053                 return -ENOMEM;
2054
2055         action->handler = handler;
2056         action->thread_fn = thread_fn;
2057         action->flags = irqflags;
2058         action->name = devname;
2059         action->dev_id = dev_id;
2060
2061         retval = irq_chip_pm_get(&desc->irq_data);
2062         if (retval < 0) {
2063                 kfree(action);
2064                 return retval;
2065         }
2066
2067         retval = __setup_irq(irq, desc, action);
2068
2069         if (retval) {
2070                 irq_chip_pm_put(&desc->irq_data);
2071                 kfree(action->secondary);
2072                 kfree(action);
2073         }
2074
2075 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
2076         if (!retval && (irqflags & IRQF_SHARED)) {
2077                 /*
2078                  * It's a shared IRQ -- the driver ought to be prepared for it
2079                  * to happen immediately, so let's make sure....
2080                  * We disable the irq to make sure that a 'real' IRQ doesn't
2081                  * run in parallel with our fake.
2082                  */
2083                 unsigned long flags;
2084
2085                 disable_irq(irq);
2086                 local_irq_save(flags);
2087
2088                 handler(irq, dev_id);
2089
2090                 local_irq_restore(flags);
2091                 enable_irq(irq);
2092         }
2093 #endif
2094         return retval;
2095 }
2096 EXPORT_SYMBOL(request_threaded_irq);
2097
2098 /**
2099  *      request_any_context_irq - allocate an interrupt line
2100  *      @irq: Interrupt line to allocate
2101  *      @handler: Function to be called when the IRQ occurs.
2102  *                Threaded handler for threaded interrupts.
2103  *      @flags: Interrupt type flags
2104  *      @name: An ascii name for the claiming device
2105  *      @dev_id: A cookie passed back to the handler function
2106  *
2107  *      This call allocates interrupt resources and enables the
2108  *      interrupt line and IRQ handling. It selects either a
2109  *      hardirq or threaded handling method depending on the
2110  *      context.
2111  *
2112  *      On failure, it returns a negative value. On success,
2113  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2114  */
2115 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2116                             unsigned long flags, const char *name, void *dev_id)
2117 {
2118         struct irq_desc *desc;
2119         int ret;
2120
2121         if (irq == IRQ_NOTCONNECTED)
2122                 return -ENOTCONN;
2123
2124         desc = irq_to_desc(irq);
2125         if (!desc)
2126                 return -EINVAL;
2127
2128         if (irq_settings_is_nested_thread(desc)) {
2129                 ret = request_threaded_irq(irq, NULL, handler,
2130                                            flags, name, dev_id);
2131                 return !ret ? IRQC_IS_NESTED : ret;
2132         }
2133
2134         ret = request_irq(irq, handler, flags, name, dev_id);
2135         return !ret ? IRQC_IS_HARDIRQ : ret;
2136 }
2137 EXPORT_SYMBOL_GPL(request_any_context_irq);
2138
2139 /**
2140  *      request_nmi - allocate an interrupt line for NMI delivery
2141  *      @irq: Interrupt line to allocate
2142  *      @handler: Function to be called when the IRQ occurs.
2143  *                Threaded handler for threaded interrupts.
2144  *      @irqflags: Interrupt type flags
2145  *      @name: An ascii name for the claiming device
2146  *      @dev_id: A cookie passed back to the handler function
2147  *
2148  *      This call allocates interrupt resources and enables the
2149  *      interrupt line and IRQ handling. It sets up the IRQ line
2150  *      to be handled as an NMI.
2151  *
2152  *      An interrupt line delivering NMIs cannot be shared and IRQ handling
2153  *      cannot be threaded.
2154  *
2155  *      Interrupt lines requested for NMI delivering must produce per cpu
2156  *      interrupts and have auto enabling setting disabled.
2157  *
2158  *      Dev_id must be globally unique. Normally the address of the
2159  *      device data structure is used as the cookie. Since the handler
2160  *      receives this value it makes sense to use it.
2161  *
2162  *      If the interrupt line cannot be used to deliver NMIs, function
2163  *      will fail and return a negative value.
2164  */
2165 int request_nmi(unsigned int irq, irq_handler_t handler,
2166                 unsigned long irqflags, const char *name, void *dev_id)
2167 {
2168         struct irqaction *action;
2169         struct irq_desc *desc;
2170         unsigned long flags;
2171         int retval;
2172
2173         if (irq == IRQ_NOTCONNECTED)
2174                 return -ENOTCONN;
2175
2176         /* NMI cannot be shared, used for Polling */
2177         if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2178                 return -EINVAL;
2179
2180         if (!(irqflags & IRQF_PERCPU))
2181                 return -EINVAL;
2182
2183         if (!handler)
2184                 return -EINVAL;
2185
2186         desc = irq_to_desc(irq);
2187
2188         if (!desc || irq_settings_can_autoenable(desc) ||
2189             !irq_settings_can_request(desc) ||
2190             WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2191             !irq_supports_nmi(desc))
2192                 return -EINVAL;
2193
2194         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2195         if (!action)
2196                 return -ENOMEM;
2197
2198         action->handler = handler;
2199         action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2200         action->name = name;
2201         action->dev_id = dev_id;
2202
2203         retval = irq_chip_pm_get(&desc->irq_data);
2204         if (retval < 0)
2205                 goto err_out;
2206
2207         retval = __setup_irq(irq, desc, action);
2208         if (retval)
2209                 goto err_irq_setup;
2210
2211         raw_spin_lock_irqsave(&desc->lock, flags);
2212
2213         /* Setup NMI state */
2214         desc->istate |= IRQS_NMI;
2215         retval = irq_nmi_setup(desc);
2216         if (retval) {
2217                 __cleanup_nmi(irq, desc);
2218                 raw_spin_unlock_irqrestore(&desc->lock, flags);
2219                 return -EINVAL;
2220         }
2221
2222         raw_spin_unlock_irqrestore(&desc->lock, flags);
2223
2224         return 0;
2225
2226 err_irq_setup:
2227         irq_chip_pm_put(&desc->irq_data);
2228 err_out:
2229         kfree(action);
2230
2231         return retval;
2232 }
2233
2234 void enable_percpu_irq(unsigned int irq, unsigned int type)
2235 {
2236         unsigned int cpu = smp_processor_id();
2237         unsigned long flags;
2238         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2239
2240         if (!desc)
2241                 return;
2242
2243         /*
2244          * If the trigger type is not specified by the caller, then
2245          * use the default for this interrupt.
2246          */
2247         type &= IRQ_TYPE_SENSE_MASK;
2248         if (type == IRQ_TYPE_NONE)
2249                 type = irqd_get_trigger_type(&desc->irq_data);
2250
2251         if (type != IRQ_TYPE_NONE) {
2252                 int ret;
2253
2254                 ret = __irq_set_trigger(desc, type);
2255
2256                 if (ret) {
2257                         WARN(1, "failed to set type for IRQ%d\n", irq);
2258                         goto out;
2259                 }
2260         }
2261
2262         irq_percpu_enable(desc, cpu);
2263 out:
2264         irq_put_desc_unlock(desc, flags);
2265 }
2266 EXPORT_SYMBOL_GPL(enable_percpu_irq);
2267
2268 void enable_percpu_nmi(unsigned int irq, unsigned int type)
2269 {
2270         enable_percpu_irq(irq, type);
2271 }
2272
2273 /**
2274  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2275  * @irq:        Linux irq number to check for
2276  *
2277  * Must be called from a non migratable context. Returns the enable
2278  * state of a per cpu interrupt on the current cpu.
2279  */
2280 bool irq_percpu_is_enabled(unsigned int irq)
2281 {
2282         unsigned int cpu = smp_processor_id();
2283         struct irq_desc *desc;
2284         unsigned long flags;
2285         bool is_enabled;
2286
2287         desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2288         if (!desc)
2289                 return false;
2290
2291         is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2292         irq_put_desc_unlock(desc, flags);
2293
2294         return is_enabled;
2295 }
2296 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2297
2298 void disable_percpu_irq(unsigned int irq)
2299 {
2300         unsigned int cpu = smp_processor_id();
2301         unsigned long flags;
2302         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2303
2304         if (!desc)
2305                 return;
2306
2307         irq_percpu_disable(desc, cpu);
2308         irq_put_desc_unlock(desc, flags);
2309 }
2310 EXPORT_SYMBOL_GPL(disable_percpu_irq);
2311
2312 void disable_percpu_nmi(unsigned int irq)
2313 {
2314         disable_percpu_irq(irq);
2315 }
2316
2317 /*
2318  * Internal function to unregister a percpu irqaction.
2319  */
2320 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2321 {
2322         struct irq_desc *desc = irq_to_desc(irq);
2323         struct irqaction *action;
2324         unsigned long flags;
2325
2326         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2327
2328         if (!desc)
2329                 return NULL;
2330
2331         raw_spin_lock_irqsave(&desc->lock, flags);
2332
2333         action = desc->action;
2334         if (!action || action->percpu_dev_id != dev_id) {
2335                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2336                 goto bad;
2337         }
2338
2339         if (!cpumask_empty(desc->percpu_enabled)) {
2340                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2341                      irq, cpumask_first(desc->percpu_enabled));
2342                 goto bad;
2343         }
2344
2345         /* Found it - now remove it from the list of entries: */
2346         desc->action = NULL;
2347
2348         desc->istate &= ~IRQS_NMI;
2349
2350         raw_spin_unlock_irqrestore(&desc->lock, flags);
2351
2352         unregister_handler_proc(irq, action);
2353
2354         irq_chip_pm_put(&desc->irq_data);
2355         module_put(desc->owner);
2356         return action;
2357
2358 bad:
2359         raw_spin_unlock_irqrestore(&desc->lock, flags);
2360         return NULL;
2361 }
2362
2363 /**
2364  *      remove_percpu_irq - free a per-cpu interrupt
2365  *      @irq: Interrupt line to free
2366  *      @act: irqaction for the interrupt
2367  *
2368  * Used to remove interrupts statically setup by the early boot process.
2369  */
2370 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2371 {
2372         struct irq_desc *desc = irq_to_desc(irq);
2373
2374         if (desc && irq_settings_is_per_cpu_devid(desc))
2375             __free_percpu_irq(irq, act->percpu_dev_id);
2376 }
2377
2378 /**
2379  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
2380  *      @irq: Interrupt line to free
2381  *      @dev_id: Device identity to free
2382  *
2383  *      Remove a percpu interrupt handler. The handler is removed, but
2384  *      the interrupt line is not disabled. This must be done on each
2385  *      CPU before calling this function. The function does not return
2386  *      until any executing interrupts for this IRQ have completed.
2387  *
2388  *      This function must not be called from interrupt context.
2389  */
2390 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2391 {
2392         struct irq_desc *desc = irq_to_desc(irq);
2393
2394         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2395                 return;
2396
2397         chip_bus_lock(desc);
2398         kfree(__free_percpu_irq(irq, dev_id));
2399         chip_bus_sync_unlock(desc);
2400 }
2401 EXPORT_SYMBOL_GPL(free_percpu_irq);
2402
2403 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2404 {
2405         struct irq_desc *desc = irq_to_desc(irq);
2406
2407         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2408                 return;
2409
2410         if (WARN_ON(!(desc->istate & IRQS_NMI)))
2411                 return;
2412
2413         kfree(__free_percpu_irq(irq, dev_id));
2414 }
2415
2416 /**
2417  *      setup_percpu_irq - setup a per-cpu interrupt
2418  *      @irq: Interrupt line to setup
2419  *      @act: irqaction for the interrupt
2420  *
2421  * Used to statically setup per-cpu interrupts in the early boot process.
2422  */
2423 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2424 {
2425         struct irq_desc *desc = irq_to_desc(irq);
2426         int retval;
2427
2428         if (!desc || !irq_settings_is_per_cpu_devid(desc))
2429                 return -EINVAL;
2430
2431         retval = irq_chip_pm_get(&desc->irq_data);
2432         if (retval < 0)
2433                 return retval;
2434
2435         retval = __setup_irq(irq, desc, act);
2436
2437         if (retval)
2438                 irq_chip_pm_put(&desc->irq_data);
2439
2440         return retval;
2441 }
2442
2443 /**
2444  *      __request_percpu_irq - allocate a percpu interrupt line
2445  *      @irq: Interrupt line to allocate
2446  *      @handler: Function to be called when the IRQ occurs.
2447  *      @flags: Interrupt type flags (IRQF_TIMER only)
2448  *      @devname: An ascii name for the claiming device
2449  *      @dev_id: A percpu cookie passed back to the handler function
2450  *
2451  *      This call allocates interrupt resources and enables the
2452  *      interrupt on the local CPU. If the interrupt is supposed to be
2453  *      enabled on other CPUs, it has to be done on each CPU using
2454  *      enable_percpu_irq().
2455  *
2456  *      Dev_id must be globally unique. It is a per-cpu variable, and
2457  *      the handler gets called with the interrupted CPU's instance of
2458  *      that variable.
2459  */
2460 int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2461                          unsigned long flags, const char *devname,
2462                          void __percpu *dev_id)
2463 {
2464         struct irqaction *action;
2465         struct irq_desc *desc;
2466         int retval;
2467
2468         if (!dev_id)
2469                 return -EINVAL;
2470
2471         desc = irq_to_desc(irq);
2472         if (!desc || !irq_settings_can_request(desc) ||
2473             !irq_settings_is_per_cpu_devid(desc))
2474                 return -EINVAL;
2475
2476         if (flags && flags != IRQF_TIMER)
2477                 return -EINVAL;
2478
2479         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2480         if (!action)
2481                 return -ENOMEM;
2482
2483         action->handler = handler;
2484         action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2485         action->name = devname;
2486         action->percpu_dev_id = dev_id;
2487
2488         retval = irq_chip_pm_get(&desc->irq_data);
2489         if (retval < 0) {
2490                 kfree(action);
2491                 return retval;
2492         }
2493
2494         retval = __setup_irq(irq, desc, action);
2495
2496         if (retval) {
2497                 irq_chip_pm_put(&desc->irq_data);
2498                 kfree(action);
2499         }
2500
2501         return retval;
2502 }
2503 EXPORT_SYMBOL_GPL(__request_percpu_irq);
2504
2505 /**
2506  *      request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2507  *      @irq: Interrupt line to allocate
2508  *      @handler: Function to be called when the IRQ occurs.
2509  *      @name: An ascii name for the claiming device
2510  *      @dev_id: A percpu cookie passed back to the handler function
2511  *
2512  *      This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2513  *      have to be setup on each CPU by calling prepare_percpu_nmi() before
2514  *      being enabled on the same CPU by using enable_percpu_nmi().
2515  *
2516  *      Dev_id must be globally unique. It is a per-cpu variable, and
2517  *      the handler gets called with the interrupted CPU's instance of
2518  *      that variable.
2519  *
2520  *      Interrupt lines requested for NMI delivering should have auto enabling
2521  *      setting disabled.
2522  *
2523  *      If the interrupt line cannot be used to deliver NMIs, function
2524  *      will fail returning a negative value.
2525  */
2526 int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2527                        const char *name, void __percpu *dev_id)
2528 {
2529         struct irqaction *action;
2530         struct irq_desc *desc;
2531         unsigned long flags;
2532         int retval;
2533
2534         if (!handler)
2535                 return -EINVAL;
2536
2537         desc = irq_to_desc(irq);
2538
2539         if (!desc || !irq_settings_can_request(desc) ||
2540             !irq_settings_is_per_cpu_devid(desc) ||
2541             irq_settings_can_autoenable(desc) ||
2542             !irq_supports_nmi(desc))
2543                 return -EINVAL;
2544
2545         /* The line cannot already be NMI */
2546         if (desc->istate & IRQS_NMI)
2547                 return -EINVAL;
2548
2549         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2550         if (!action)
2551                 return -ENOMEM;
2552
2553         action->handler = handler;
2554         action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2555                 | IRQF_NOBALANCING;
2556         action->name = name;
2557         action->percpu_dev_id = dev_id;
2558
2559         retval = irq_chip_pm_get(&desc->irq_data);
2560         if (retval < 0)
2561                 goto err_out;
2562
2563         retval = __setup_irq(irq, desc, action);
2564         if (retval)
2565                 goto err_irq_setup;
2566
2567         raw_spin_lock_irqsave(&desc->lock, flags);
2568         desc->istate |= IRQS_NMI;
2569         raw_spin_unlock_irqrestore(&desc->lock, flags);
2570
2571         return 0;
2572
2573 err_irq_setup:
2574         irq_chip_pm_put(&desc->irq_data);
2575 err_out:
2576         kfree(action);
2577
2578         return retval;
2579 }
2580
2581 /**
2582  *      prepare_percpu_nmi - performs CPU local setup for NMI delivery
2583  *      @irq: Interrupt line to prepare for NMI delivery
2584  *
2585  *      This call prepares an interrupt line to deliver NMI on the current CPU,
2586  *      before that interrupt line gets enabled with enable_percpu_nmi().
2587  *
2588  *      As a CPU local operation, this should be called from non-preemptible
2589  *      context.
2590  *
2591  *      If the interrupt line cannot be used to deliver NMIs, function
2592  *      will fail returning a negative value.
2593  */
2594 int prepare_percpu_nmi(unsigned int irq)
2595 {
2596         unsigned long flags;
2597         struct irq_desc *desc;
2598         int ret = 0;
2599
2600         WARN_ON(preemptible());
2601
2602         desc = irq_get_desc_lock(irq, &flags,
2603                                  IRQ_GET_DESC_CHECK_PERCPU);
2604         if (!desc)
2605                 return -EINVAL;
2606
2607         if (WARN(!(desc->istate & IRQS_NMI),
2608                  KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2609                  irq)) {
2610                 ret = -EINVAL;
2611                 goto out;
2612         }
2613
2614         ret = irq_nmi_setup(desc);
2615         if (ret) {
2616                 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2617                 goto out;
2618         }
2619
2620 out:
2621         irq_put_desc_unlock(desc, flags);
2622         return ret;
2623 }
2624
2625 /**
2626  *      teardown_percpu_nmi - undoes NMI setup of IRQ line
2627  *      @irq: Interrupt line from which CPU local NMI configuration should be
2628  *            removed
2629  *
2630  *      This call undoes the setup done by prepare_percpu_nmi().
2631  *
2632  *      IRQ line should not be enabled for the current CPU.
2633  *
2634  *      As a CPU local operation, this should be called from non-preemptible
2635  *      context.
2636  */
2637 void teardown_percpu_nmi(unsigned int irq)
2638 {
2639         unsigned long flags;
2640         struct irq_desc *desc;
2641
2642         WARN_ON(preemptible());
2643
2644         desc = irq_get_desc_lock(irq, &flags,
2645                                  IRQ_GET_DESC_CHECK_PERCPU);
2646         if (!desc)
2647                 return;
2648
2649         if (WARN_ON(!(desc->istate & IRQS_NMI)))
2650                 goto out;
2651
2652         irq_nmi_teardown(desc);
2653 out:
2654         irq_put_desc_unlock(desc, flags);
2655 }
2656
2657 int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2658                             bool *state)
2659 {
2660         struct irq_chip *chip;
2661         int err = -EINVAL;
2662
2663         do {
2664                 chip = irq_data_get_irq_chip(data);
2665                 if (WARN_ON_ONCE(!chip))
2666                         return -ENODEV;
2667                 if (chip->irq_get_irqchip_state)
2668                         break;
2669 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2670                 data = data->parent_data;
2671 #else
2672                 data = NULL;
2673 #endif
2674         } while (data);
2675
2676         if (data)
2677                 err = chip->irq_get_irqchip_state(data, which, state);
2678         return err;
2679 }
2680
2681 /**
2682  *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
2683  *      @irq: Interrupt line that is forwarded to a VM
2684  *      @which: One of IRQCHIP_STATE_* the caller wants to know about
2685  *      @state: a pointer to a boolean where the state is to be storeed
2686  *
2687  *      This call snapshots the internal irqchip state of an
2688  *      interrupt, returning into @state the bit corresponding to
2689  *      stage @which
2690  *
2691  *      This function should be called with preemption disabled if the
2692  *      interrupt controller has per-cpu registers.
2693  */
2694 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2695                           bool *state)
2696 {
2697         struct irq_desc *desc;
2698         struct irq_data *data;
2699         unsigned long flags;
2700         int err = -EINVAL;
2701
2702         desc = irq_get_desc_buslock(irq, &flags, 0);
2703         if (!desc)
2704                 return err;
2705
2706         data = irq_desc_get_irq_data(desc);
2707
2708         err = __irq_get_irqchip_state(data, which, state);
2709
2710         irq_put_desc_busunlock(desc, flags);
2711         return err;
2712 }
2713 EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2714
2715 /**
2716  *      irq_set_irqchip_state - set the state of a forwarded interrupt.
2717  *      @irq: Interrupt line that is forwarded to a VM
2718  *      @which: State to be restored (one of IRQCHIP_STATE_*)
2719  *      @val: Value corresponding to @which
2720  *
2721  *      This call sets the internal irqchip state of an interrupt,
2722  *      depending on the value of @which.
2723  *
2724  *      This function should be called with migration disabled if the
2725  *      interrupt controller has per-cpu registers.
2726  */
2727 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2728                           bool val)
2729 {
2730         struct irq_desc *desc;
2731         struct irq_data *data;
2732         struct irq_chip *chip;
2733         unsigned long flags;
2734         int err = -EINVAL;
2735
2736         desc = irq_get_desc_buslock(irq, &flags, 0);
2737         if (!desc)
2738                 return err;
2739
2740         data = irq_desc_get_irq_data(desc);
2741
2742         do {
2743                 chip = irq_data_get_irq_chip(data);
2744                 if (WARN_ON_ONCE(!chip)) {
2745                         err = -ENODEV;
2746                         goto out_unlock;
2747                 }
2748                 if (chip->irq_set_irqchip_state)
2749                         break;
2750 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2751                 data = data->parent_data;
2752 #else
2753                 data = NULL;
2754 #endif
2755         } while (data);
2756
2757         if (data)
2758                 err = chip->irq_set_irqchip_state(data, which, val);
2759
2760 out_unlock:
2761         irq_put_desc_busunlock(desc, flags);
2762         return err;
2763 }
2764 EXPORT_SYMBOL_GPL(irq_set_irqchip_state);