x86, cpu: Rename checking_wrmsrl() to wrmsrl_safe()
authorH. Peter Anvin <hpa@zytor.com>
Thu, 7 Jun 2012 20:32:04 +0000 (13:32 -0700)
committerH. Peter Anvin <hpa@zytor.com>
Thu, 7 Jun 2012 20:32:04 +0000 (13:32 -0700)
Rename checking_wrmsrl() to wrmsrl_safe(), to match the naming
convention used by all the other MSR access functions/macros.

Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/include/asm/msr.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/kernel/process_64.c
arch/x86/vdso/vdso32-setup.c

index cb33b5f..fe83d74 100644 (file)
@@ -211,7 +211,7 @@ do {                                                            \
 
 #endif /* !CONFIG_PARAVIRT */
 
-#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val),                \
+#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val),            \
                                             (u32)((val) >> 32))
 
 #define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
index c928eb2..9d92e19 100644 (file)
@@ -621,7 +621,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 
                if (!rdmsrl_safe(0xc0011005, &val)) {
                        val |= 1ULL << 54;
-                       checking_wrmsrl(0xc0011005, val);
+                       wrmsrl_safe(0xc0011005, val);
                        rdmsrl(0xc0011005, val);
                        if (val & (1ULL << 54)) {
                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
@@ -712,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
                if (err == 0) {
                        mask |= (1 << 10);
-                       checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+                       wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
                }
        }
 
index e049d6d..4e3ba9c 100644 (file)
@@ -222,7 +222,7 @@ static bool check_hw_exists(void)
         * that don't trap on the MSR access and always return 0s.
         */
        val = 0xabcdUL;
-       ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
+       ret = wrmsrl_safe(x86_pmu_event_addr(0), val);
        ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
        if (ret || val != val_new)
                goto msr_fail;
index 166546e..7789aa3 100644 (file)
@@ -1003,11 +1003,11 @@ static void intel_pmu_reset(void)
        printk("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
-               checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
+               wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
+               wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
-               checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+               wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
 
        if (ds)
                ds->bts_index = ds->bts_buffer_base;
index 47124a7..6c82e40 100644 (file)
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
         * So at moment let leave metrics turned on forever -- it's
         * ok for now but need to be revisited!
         *
-        * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
-        * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
+        * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
+        * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
         */
 }
 
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
         * state we need to clear P4_CCCR_OVF, otherwise interrupt get
         * asserted again and again
         */
-       (void)checking_wrmsrl(hwc->config_base,
+       (void)wrmsrl_safe(hwc->config_base,
                (u64)(p4_config_unpack_cccr(hwc->config)) &
                        ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
 }
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config)
 
        bind = &p4_pebs_bind_map[idx];
 
-       (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE,     (u64)bind->metric_pebs);
-       (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT,  (u64)bind->metric_vert);
+       (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
+       (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT,      (u64)bind->metric_vert);
 }
 
 static void p4_pmu_enable_event(struct perf_event *event)
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event)
         */
        p4_pmu_enable_pebs(hwc->config);
 
-       (void)checking_wrmsrl(escr_addr, escr_conf);
-       (void)checking_wrmsrl(hwc->config_base,
+       (void)wrmsrl_safe(escr_addr, escr_conf);
+       (void)wrmsrl_safe(hwc->config_base,
                                (cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
 }
 
index 32bcfc7..e4dd0f7 100644 (file)
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base, val);
+       (void)wrmsrl_safe(hwc->config_base, val);
 }
 
 static void p6_pmu_enable_event(struct perf_event *event)
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
        if (cpuc->enabled)
                val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
-       (void)checking_wrmsrl(hwc->config_base, val);
+       (void)wrmsrl_safe(hwc->config_base, val);
 }
 
 PMU_FORMAT_ATTR(event, "config:0-7"    );
index 61cdf7f..3e215ba 100644 (file)
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        task->thread.gs = addr;
                        if (doit) {
                                load_gs_index(0);
-                               ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+                               ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
                        }
                }
                put_cpu();
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                                /* set the selector to 0 to not confuse
                                   __switch_to */
                                loadsegment(fs, 0);
-                               ret = checking_wrmsrl(MSR_FS_BASE, addr);
+                               ret = wrmsrl_safe(MSR_FS_BASE, addr);
                        }
                }
                put_cpu();
index 66e6d93..0faad64 100644 (file)
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void)
 {
        /* Load these always in case some future AMD CPU supports
           SYSENTER from compat mode too. */
-       checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
-       checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
 
        wrmsrl(MSR_CSTAR, ia32_cstar_target);
 }