1 #ifndef __X86_64_MMU_CONTEXT_H
2 #define __X86_64_MMU_CONTEXT_H
5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
12 * possibly do the LDT unload here?
14 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
15 void destroy_context(struct mm_struct *mm);
17 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
20 if (read_pda(mmu_state) == TLBSTATE_OK)
21 write_pda(mmu_state, TLBSTATE_LAZY);
25 static inline void load_cr3(pgd_t *pgd)
27 asm volatile("movq %0,%%cr3" :: "r" (__pa(pgd)) : "memory");
30 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
31 struct task_struct *tsk)
33 unsigned cpu = smp_processor_id();
34 if (likely(prev != next)) {
35 /* stop flush ipis for the previous mm */
36 cpu_clear(cpu, prev->cpu_vm_mask);
38 write_pda(mmu_state, TLBSTATE_OK);
39 write_pda(active_mm, next);
41 cpu_set(cpu, next->cpu_vm_mask);
44 if (unlikely(next->context.ldt != prev->context.ldt))
45 load_LDT_nolock(&next->context, cpu);
49 write_pda(mmu_state, TLBSTATE_OK);
50 if (read_pda(active_mm) != next)
52 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
53 /* We were in lazy tlb mode and leave_mm disabled
54 * tlb flush IPI delivery. We must reload CR3
55 * to make sure to use no freed page tables.
58 load_LDT_nolock(&next->context, cpu);
64 #define deactivate_mm(tsk,mm) do { \
66 asm volatile("movl %0,%%fs"::"r"(0)); \
69 #define activate_mm(prev, next) \
70 switch_mm((prev),(next),NULL)