ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM
authorNicolas Pitre <nicolas.pitre@linaro.org>
Sun, 25 Nov 2012 02:24:32 +0000 (03:24 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 26 Nov 2012 12:23:53 +0000 (12:23 +0000)
The kvm_seq value has nothing to do what so ever with this other KVM.
Given that KVM support on ARM is imminent, it's best to rename kvm_seq
into something else to clearly identify what it is about i.e. a sequence
number for vmalloc section mappings.

Signed-off-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/mmu.h
arch/arm/include/asm/mmu_context.h
arch/arm/mm/context.c
arch/arm/mm/ioremap.c

index 5b53b53..9f77e78 100644 (file)
@@ -7,7 +7,7 @@ typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
        u64 id;
 #endif
-       unsigned int kvm_seq;
+       unsigned int vmalloc_seq;
 } mm_context_t;
 
 #ifdef CONFIG_CPU_HAS_ASID
index a64f61c..e1f644b 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/proc-fns.h>
 #include <asm-generic/mm_hooks.h>
 
-void __check_kvm_seq(struct mm_struct *mm);
+void __check_vmalloc_seq(struct mm_struct *mm);
 
 #ifdef CONFIG_CPU_HAS_ASID
 
@@ -34,8 +34,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
 static inline void check_and_switch_context(struct mm_struct *mm,
                                            struct task_struct *tsk)
 {
-       if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
-               __check_kvm_seq(mm);
+       if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
+               __check_vmalloc_seq(mm);
 
        if (irqs_disabled())
                /*
index 7a27d73..bc4a5e9 100644 (file)
@@ -186,8 +186,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
        unsigned long flags;
        unsigned int cpu = smp_processor_id();
 
-       if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
-               __check_kvm_seq(mm);
+       if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
+               __check_vmalloc_seq(mm);
 
        /*
         * Required during context switch to avoid speculative page table
index 5dcc2fd..88fd86c 100644 (file)
@@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys,
 }
 EXPORT_SYMBOL(ioremap_page);
 
-void __check_kvm_seq(struct mm_struct *mm)
+void __check_vmalloc_seq(struct mm_struct *mm)
 {
        unsigned int seq;
 
        do {
-               seq = init_mm.context.kvm_seq;
+               seq = init_mm.context.vmalloc_seq;
                memcpy(pgd_offset(mm, VMALLOC_START),
                       pgd_offset_k(VMALLOC_START),
                       sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
                                        pgd_index(VMALLOC_START)));
-               mm->context.kvm_seq = seq;
-       } while (seq != init_mm.context.kvm_seq);
+               mm->context.vmalloc_seq = seq;
+       } while (seq != init_mm.context.vmalloc_seq);
 }
 
 #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
@@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
                if (!pmd_none(pmd)) {
                        /*
                         * Clear the PMD from the page table, and
-                        * increment the kvm sequence so others
+                        * increment the vmalloc sequence so others
                         * notice this change.
                         *
                         * Note: this is still racy on SMP machines.
                         */
                        pmd_clear(pmdp);
-                       init_mm.context.kvm_seq++;
+                       init_mm.context.vmalloc_seq++;
 
                        /*
                         * Free the page table, if there was one.
@@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
         * Ensure that the active_mm is up to date - we want to
         * catch any use-after-iounmap cases.
         */
-       if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
-               __check_kvm_seq(current->active_mm);
+       if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
+               __check_vmalloc_seq(current->active_mm);
 
        flush_tlb_kernel_range(virt, end);
 }