1 #include <linux/module.h>
2 #include <linux/preempt.h>
13 static void __rdmsr_on_cpu(void *info)
15 struct msr_info *rv = info;
17 int this_cpu = raw_smp_processor_id();
20 reg = per_cpu_ptr(rv->msrs, this_cpu);
24 rdmsr(rv->msr_no, reg->l, reg->h);
27 static void __wrmsr_on_cpu(void *info)
29 struct msr_info *rv = info;
31 int this_cpu = raw_smp_processor_id();
34 reg = per_cpu_ptr(rv->msrs, this_cpu);
38 wrmsr(rv->msr_no, reg->l, reg->h);
41 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
46 memset(&rv, 0, sizeof(rv));
49 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
55 EXPORT_SYMBOL(rdmsr_on_cpu);
57 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
62 memset(&rv, 0, sizeof(rv));
67 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
71 EXPORT_SYMBOL(wrmsr_on_cpu);
73 static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
75 void (*msr_func) (void *info))
80 memset(&rv, 0, sizeof(rv));
87 if (cpumask_test_cpu(this_cpu, mask))
90 smp_call_function_many(mask, msr_func, &rv, 1);
94 /* rdmsr on a bunch of CPUs
98 * @msrs: array of MSR values
101 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
103 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
105 EXPORT_SYMBOL(rdmsr_on_cpus);
108 * wrmsr on a bunch of CPUs
112 * @msrs: array of MSR values
115 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
117 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
119 EXPORT_SYMBOL(wrmsr_on_cpus);
121 struct msr *msrs_alloc(void)
123 struct msr *msrs = NULL;
125 msrs = alloc_percpu(struct msr);
127 pr_warning("%s: error allocating msrs\n", __func__);
133 EXPORT_SYMBOL(msrs_alloc);
135 void msrs_free(struct msr *msrs)
139 EXPORT_SYMBOL(msrs_free);
141 /* These "safe" variants are slower and should be used when the target MSR
142 may not actually exist. */
143 static void __rdmsr_safe_on_cpu(void *info)
145 struct msr_info *rv = info;
147 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
150 static void __wrmsr_safe_on_cpu(void *info)
152 struct msr_info *rv = info;
154 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
157 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
162 memset(&rv, 0, sizeof(rv));
165 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
169 return err ? err : rv.err;
171 EXPORT_SYMBOL(rdmsr_safe_on_cpu);
173 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
178 memset(&rv, 0, sizeof(rv));
183 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
185 return err ? err : rv.err;
187 EXPORT_SYMBOL(wrmsr_safe_on_cpu);
190 * These variants are significantly slower, but allows control over
191 * the entire 32-bit GPR set.
193 struct msr_regs_info {
198 static void __rdmsr_safe_regs_on_cpu(void *info)
200 struct msr_regs_info *rv = info;
202 rv->err = rdmsr_safe_regs(rv->regs);
205 static void __wrmsr_safe_regs_on_cpu(void *info)
207 struct msr_regs_info *rv = info;
209 rv->err = wrmsr_safe_regs(rv->regs);
212 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
215 struct msr_regs_info rv;
219 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
221 return err ? err : rv.err;
223 EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
225 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
228 struct msr_regs_info rv;
232 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
234 return err ? err : rv.err;
236 EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);