swiotlb: Validate bounce size in the sync/unmap path
[platform/kernel/linux-starfive.git] / kernel / up.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
4  */
5
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/smp.h>
10 #include <linux/hypervisor.h>
11
12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
13                                 int wait)
14 {
15         unsigned long flags;
16
17         if (cpu != 0)
18                 return -ENXIO;
19
20         local_irq_save(flags);
21         func(info);
22         local_irq_restore(flags);
23
24         return 0;
25 }
26 EXPORT_SYMBOL(smp_call_function_single);
27
28 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
29 {
30         unsigned long flags;
31
32         local_irq_save(flags);
33         csd->func(csd->info);
34         local_irq_restore(flags);
35         return 0;
36 }
37 EXPORT_SYMBOL(smp_call_function_single_async);
38
39 void on_each_cpu(smp_call_func_t func, void *info, int wait)
40 {
41         unsigned long flags;
42
43         local_irq_save(flags);
44         func(info);
45         local_irq_restore(flags);
46 }
47 EXPORT_SYMBOL(on_each_cpu);
48
49 /*
50  * Note we still need to test the mask even for UP
51  * because we actually can get an empty mask from
52  * code that on SMP might call us without the local
53  * CPU in the mask.
54  */
55 void on_each_cpu_mask(const struct cpumask *mask,
56                       smp_call_func_t func, void *info, bool wait)
57 {
58         unsigned long flags;
59
60         if (cpumask_test_cpu(0, mask)) {
61                 local_irq_save(flags);
62                 func(info);
63                 local_irq_restore(flags);
64         }
65 }
66 EXPORT_SYMBOL(on_each_cpu_mask);
67
68 /*
69  * Preemption is disabled here to make sure the cond_func is called under the
70  * same condtions in UP and SMP.
71  */
72 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
73                            void *info, bool wait, const struct cpumask *mask)
74 {
75         unsigned long flags;
76
77         preempt_disable();
78         if (cond_func(0, info)) {
79                 local_irq_save(flags);
80                 func(info);
81                 local_irq_restore(flags);
82         }
83         preempt_enable();
84 }
85 EXPORT_SYMBOL(on_each_cpu_cond_mask);
86
87 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
88                       void *info, bool wait)
89 {
90         on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
91 }
92 EXPORT_SYMBOL(on_each_cpu_cond);
93
94 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
95 {
96         int ret;
97
98         if (cpu != 0)
99                 return -ENXIO;
100
101         if (phys)
102                 hypervisor_pin_vcpu(0);
103         ret = func(par);
104         if (phys)
105                 hypervisor_pin_vcpu(-1);
106
107         return ret;
108 }
109 EXPORT_SYMBOL_GPL(smp_call_on_cpu);