1 // SPDX-License-Identifier: GPL-2.0
3 * High memory handling common code and variables.
5 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
6 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
11 * means up to 64 Gigabytes physical RAM.
13 * Rewrote high memory support to move the page cache into
14 * high memory. Implemented permanent (schedulable) kmaps
15 * based on Linus' idea.
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
21 #include <linux/export.h>
22 #include <linux/swap.h>
23 #include <linux/bio.h>
24 #include <linux/pagemap.h>
25 #include <linux/mempool.h>
26 #include <linux/blkdev.h>
27 #include <linux/init.h>
28 #include <linux/hash.h>
29 #include <linux/highmem.h>
30 #include <linux/kgdb.h>
31 #include <asm/tlbflush.h>
32 #include <linux/vmalloc.h>
35 * Virtual_count is not a pure "count".
36 * 0 means that it is not mapped, and has not been mapped
37 * since a TLB flush - it is usable.
38 * 1 means that there are no users, but it has been mapped
39 * since the last TLB flush - so we can't use it.
40 * n means that there are (n-1) current users of it.
45 * Architecture with aliasing data cache may define the following family of
46 * helper functions in its asm/highmem.h to control cache color of virtual
47 * addresses where physical memory pages are mapped by kmap.
49 #ifndef get_pkmap_color
52 * Determine color of virtual address where the page should be mapped.
54 static inline unsigned int get_pkmap_color(struct page *page)
58 #define get_pkmap_color get_pkmap_color
61 * Get next index for mapping inside PKMAP region for page with given color.
63 static inline unsigned int get_next_pkmap_nr(unsigned int color)
65 static unsigned int last_pkmap_nr;
67 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
72 * Determine if page index inside PKMAP region (pkmap_nr) of given color
73 * has wrapped around PKMAP region end. When this happens an attempt to
74 * flush all unused PKMAP slots is made.
76 static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
82 * Get the number of PKMAP entries of the given color. If no free slot is
83 * found after checking that many entries, kmap will sleep waiting for
84 * someone to call kunmap and free PKMAP slot.
86 static inline int get_pkmap_entries_count(unsigned int color)
92 * Get head of a wait queue for PKMAP entries of the given color.
93 * Wait queues for different mapping colors should be independent to avoid
94 * unnecessary wakeups caused by freeing of slots of other colors.
96 static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
98 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
100 return &pkmap_map_wait;
104 atomic_long_t _totalhigh_pages __read_mostly;
105 EXPORT_SYMBOL(_totalhigh_pages);
107 unsigned int __nr_free_highpages (void)
110 unsigned int pages = 0;
112 for_each_populated_zone(zone) {
113 if (is_highmem(zone))
114 pages += zone_page_state(zone, NR_FREE_PAGES);
120 static int pkmap_count[LAST_PKMAP];
121 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
123 pte_t * pkmap_page_table;
126 * Most architectures have no use for kmap_high_get(), so let's abstract
127 * the disabling of IRQ out of the locking in that case to save on a
128 * potential useless overhead.
130 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
131 #define lock_kmap() spin_lock_irq(&kmap_lock)
132 #define unlock_kmap() spin_unlock_irq(&kmap_lock)
133 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
134 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
136 #define lock_kmap() spin_lock(&kmap_lock)
137 #define unlock_kmap() spin_unlock(&kmap_lock)
138 #define lock_kmap_any(flags) \
139 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
140 #define unlock_kmap_any(flags) \
141 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
144 struct page *__kmap_to_page(void *vaddr)
146 unsigned long addr = (unsigned long)vaddr;
148 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
149 int i = PKMAP_NR(addr);
150 return pte_page(pkmap_page_table[i]);
153 return virt_to_page(addr);
155 EXPORT_SYMBOL(__kmap_to_page);
157 static void flush_all_zero_pkmaps(void)
164 for (i = 0; i < LAST_PKMAP; i++) {
168 * zero means we don't have anything to do,
169 * >1 means that it is still in use. Only
170 * a count of 1 means that it is free but
171 * needs to be unmapped
173 if (pkmap_count[i] != 1)
178 BUG_ON(pte_none(pkmap_page_table[i]));
181 * Don't need an atomic fetch-and-clear op here;
182 * no-one has the page mapped, and cannot get at
183 * its virtual address (and hence PTE) without first
184 * getting the kmap_lock (which is held here).
185 * So no dangers, even with speculative execution.
187 page = pte_page(pkmap_page_table[i]);
188 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
190 set_page_address(page, NULL);
194 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
197 void __kmap_flush_unused(void)
200 flush_all_zero_pkmaps();
204 static inline unsigned long map_new_virtual(struct page *page)
208 unsigned int last_pkmap_nr;
209 unsigned int color = get_pkmap_color(page);
212 count = get_pkmap_entries_count(color);
213 /* Find an empty entry */
215 last_pkmap_nr = get_next_pkmap_nr(color);
216 if (no_more_pkmaps(last_pkmap_nr, color)) {
217 flush_all_zero_pkmaps();
218 count = get_pkmap_entries_count(color);
220 if (!pkmap_count[last_pkmap_nr])
221 break; /* Found a usable entry */
226 * Sleep for somebody else to unmap their entries
229 DECLARE_WAITQUEUE(wait, current);
230 wait_queue_head_t *pkmap_map_wait =
231 get_pkmap_wait_queue_head(color);
233 __set_current_state(TASK_UNINTERRUPTIBLE);
234 add_wait_queue(pkmap_map_wait, &wait);
237 remove_wait_queue(pkmap_map_wait, &wait);
240 /* Somebody else might have mapped it while we slept */
241 if (page_address(page))
242 return (unsigned long)page_address(page);
248 vaddr = PKMAP_ADDR(last_pkmap_nr);
249 set_pte_at(&init_mm, vaddr,
250 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
252 pkmap_count[last_pkmap_nr] = 1;
253 set_page_address(page, (void *)vaddr);
259 * kmap_high - map a highmem page into memory
260 * @page: &struct page to map
262 * Returns the page's virtual memory address.
264 * We cannot call this from interrupts, as it may block.
266 void *kmap_high(struct page *page)
271 * For highmem pages, we can't trust "virtual" until
272 * after we have the lock.
275 vaddr = (unsigned long)page_address(page);
277 vaddr = map_new_virtual(page);
278 pkmap_count[PKMAP_NR(vaddr)]++;
279 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
281 return (void*) vaddr;
284 EXPORT_SYMBOL(kmap_high);
286 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
288 * kmap_high_get - pin a highmem page into memory
289 * @page: &struct page to pin
291 * Returns the page's current virtual memory address, or NULL if no mapping
292 * exists. If and only if a non null address is returned then a
293 * matching call to kunmap_high() is necessary.
295 * This can be called from any context.
297 void *kmap_high_get(struct page *page)
299 unsigned long vaddr, flags;
301 lock_kmap_any(flags);
302 vaddr = (unsigned long)page_address(page);
304 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
305 pkmap_count[PKMAP_NR(vaddr)]++;
307 unlock_kmap_any(flags);
308 return (void*) vaddr;
313 * kunmap_high - unmap a highmem page into memory
314 * @page: &struct page to unmap
316 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
317 * only from user context.
319 void kunmap_high(struct page *page)
325 unsigned int color = get_pkmap_color(page);
326 wait_queue_head_t *pkmap_map_wait;
328 lock_kmap_any(flags);
329 vaddr = (unsigned long)page_address(page);
331 nr = PKMAP_NR(vaddr);
334 * A count must never go down to zero
335 * without a TLB flush!
338 switch (--pkmap_count[nr]) {
343 * Avoid an unnecessary wake_up() function call.
344 * The common case is pkmap_count[] == 1, but
346 * The tasks queued in the wait-queue are guarded
347 * by both the lock in the wait-queue-head and by
348 * the kmap_lock. As the kmap_lock is held here,
349 * no need for the wait-queue-head's lock. Simply
350 * test if the queue is empty.
352 pkmap_map_wait = get_pkmap_wait_queue_head(color);
353 need_wakeup = waitqueue_active(pkmap_map_wait);
355 unlock_kmap_any(flags);
357 /* do wake-up, if needed, race-free outside of the spin lock */
359 wake_up(pkmap_map_wait);
361 EXPORT_SYMBOL(kunmap_high);
362 #endif /* CONFIG_HIGHMEM */
364 #ifdef CONFIG_KMAP_LOCAL
366 #include <asm/kmap_size.h>
369 * With DEBUG_HIGHMEM the stack depth is doubled and every second
370 * slot is unused which acts as a guard page
372 #ifdef CONFIG_DEBUG_HIGHMEM
378 static inline int kmap_local_idx_push(void)
380 WARN_ON_ONCE(in_irq() && !irqs_disabled());
381 current->kmap_ctrl.idx += KM_INCR;
382 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
383 return current->kmap_ctrl.idx - 1;
386 static inline int kmap_local_idx(void)
388 return current->kmap_ctrl.idx - 1;
391 static inline void kmap_local_idx_pop(void)
393 current->kmap_ctrl.idx -= KM_INCR;
394 BUG_ON(current->kmap_ctrl.idx < 0);
397 #ifndef arch_kmap_local_post_map
398 # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
401 #ifndef arch_kmap_local_pre_unmap
402 # define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
405 #ifndef arch_kmap_local_post_unmap
406 # define arch_kmap_local_post_unmap(vaddr) do { } while (0)
409 #ifndef arch_kmap_local_map_idx
410 #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
413 #ifndef arch_kmap_local_unmap_idx
414 #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
417 #ifndef arch_kmap_local_high_get
418 static inline void *arch_kmap_local_high_get(struct page *page)
424 /* Unmap a local mapping which was obtained by kmap_high_get() */
425 static inline bool kmap_high_unmap_local(unsigned long vaddr)
427 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
428 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
429 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
436 static inline int kmap_local_calc_idx(int idx)
438 return idx + KM_MAX_IDX * smp_processor_id();
441 static pte_t *__kmap_pte;
443 static pte_t *kmap_get_pte(void)
446 __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
450 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
452 pte_t pteval, *kmap_pte = kmap_get_pte();
457 * Disable migration so resulting virtual address is stable
458 * accross preemption.
462 idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
463 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
464 BUG_ON(!pte_none(*(kmap_pte - idx)));
465 pteval = pfn_pte(pfn, prot);
466 set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
467 arch_kmap_local_post_map(vaddr, pteval);
468 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
471 return (void *)vaddr;
473 EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
475 void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
479 if (!PageHighMem(page))
480 return page_address(page);
482 /* Try kmap_high_get() if architecture has it enabled */
483 kmap = arch_kmap_local_high_get(page);
487 return __kmap_local_pfn_prot(page_to_pfn(page), prot);
489 EXPORT_SYMBOL(__kmap_local_page_prot);
491 void kunmap_local_indexed(void *vaddr)
493 unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
494 pte_t *kmap_pte = kmap_get_pte();
497 if (addr < __fix_to_virt(FIX_KMAP_END) ||
498 addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
500 * Handle mappings which were obtained by kmap_high_get()
501 * first as the virtual address of such mappings is below
502 * PAGE_OFFSET. Warn for all other addresses which are in
503 * the user space part of the virtual address space.
505 if (!kmap_high_unmap_local(addr))
506 WARN_ON_ONCE(addr < PAGE_OFFSET);
511 idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
512 WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
514 arch_kmap_local_pre_unmap(addr);
515 pte_clear(&init_mm, addr, kmap_pte - idx);
516 arch_kmap_local_post_unmap(addr);
517 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
518 kmap_local_idx_pop();
522 EXPORT_SYMBOL(kunmap_local_indexed);
525 * Invoked before switch_to(). This is safe even when during or after
526 * clearing the maps an interrupt which needs a kmap_local happens because
527 * the task::kmap_ctrl.idx is not modified by the unmapping code so a
528 * nested kmap_local will use the next unused index and restore the index
529 * on unmap. The already cleared kmaps of the outgoing task are irrelevant
530 * because the interrupt context does not know about them. The same applies
531 * when scheduling back in for an interrupt which happens before the
532 * restore is complete.
534 void __kmap_local_sched_out(void)
536 struct task_struct *tsk = current;
537 pte_t *kmap_pte = kmap_get_pte();
541 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
542 pte_t pteval = tsk->kmap_ctrl.pteval[i];
546 /* With debug all even slots are unmapped and act as guard */
547 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
548 WARN_ON_ONCE(!pte_none(pteval));
551 if (WARN_ON_ONCE(pte_none(pteval)))
555 * This is a horrible hack for XTENSA to calculate the
556 * coloured PTE index. Uses the PFN encoded into the pteval
557 * and the map index calculation because the actual mapped
558 * virtual address is not stored in task::kmap_ctrl.
559 * For any sane architecture this is optimized out.
561 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
563 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
564 arch_kmap_local_pre_unmap(addr);
565 pte_clear(&init_mm, addr, kmap_pte - idx);
566 arch_kmap_local_post_unmap(addr);
570 void __kmap_local_sched_in(void)
572 struct task_struct *tsk = current;
573 pte_t *kmap_pte = kmap_get_pte();
577 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
578 pte_t pteval = tsk->kmap_ctrl.pteval[i];
582 /* With debug all even slots are unmapped and act as guard */
583 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
584 WARN_ON_ONCE(!pte_none(pteval));
587 if (WARN_ON_ONCE(pte_none(pteval)))
590 /* See comment in __kmap_local_sched_out() */
591 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
592 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
593 set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
594 arch_kmap_local_post_map(addr, pteval);
598 void kmap_local_fork(struct task_struct *tsk)
600 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
601 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
606 #if defined(HASHED_PAGE_VIRTUAL)
608 #define PA_HASH_ORDER 7
611 * Describes one page->virtual association
613 struct page_address_map {
616 struct list_head list;
619 static struct page_address_map page_address_maps[LAST_PKMAP];
624 static struct page_address_slot {
625 struct list_head lh; /* List of page_address_maps */
626 spinlock_t lock; /* Protect this bucket's list */
627 } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
629 static struct page_address_slot *page_slot(const struct page *page)
631 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
635 * page_address - get the mapped virtual address of a page
636 * @page: &struct page to get the virtual address of
638 * Returns the page's virtual address.
640 void *page_address(const struct page *page)
644 struct page_address_slot *pas;
646 if (!PageHighMem(page))
647 return lowmem_page_address(page);
649 pas = page_slot(page);
651 spin_lock_irqsave(&pas->lock, flags);
652 if (!list_empty(&pas->lh)) {
653 struct page_address_map *pam;
655 list_for_each_entry(pam, &pas->lh, list) {
656 if (pam->page == page) {
663 spin_unlock_irqrestore(&pas->lock, flags);
667 EXPORT_SYMBOL(page_address);
670 * set_page_address - set a page's virtual address
671 * @page: &struct page to set
672 * @virtual: virtual address to use
674 void set_page_address(struct page *page, void *virtual)
677 struct page_address_slot *pas;
678 struct page_address_map *pam;
680 BUG_ON(!PageHighMem(page));
682 pas = page_slot(page);
683 if (virtual) { /* Add */
684 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
686 pam->virtual = virtual;
688 spin_lock_irqsave(&pas->lock, flags);
689 list_add_tail(&pam->list, &pas->lh);
690 spin_unlock_irqrestore(&pas->lock, flags);
691 } else { /* Remove */
692 spin_lock_irqsave(&pas->lock, flags);
693 list_for_each_entry(pam, &pas->lh, list) {
694 if (pam->page == page) {
695 list_del(&pam->list);
696 spin_unlock_irqrestore(&pas->lock, flags);
700 spin_unlock_irqrestore(&pas->lock, flags);
706 void __init page_address_init(void)
710 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
711 INIT_LIST_HEAD(&page_address_htable[i].lh);
712 spin_lock_init(&page_address_htable[i].lock);
716 #endif /* defined(HASHED_PAGE_VIRTUAL) */