WIP: update tizen_qemu_defconfig
[platform/kernel/linux-starfive.git] / mm / highmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * High memory handling common code and variables.
4  *
5  * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
6  *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
7  *
8  *
9  * Redesigned the x86 32-bit VM architecture to deal with
10  * 64-bit physical space. With current x86 CPUs this
11  * means up to 64 Gigabytes physical RAM.
12  *
13  * Rewrote high memory support to move the page cache into
14  * high memory. Implemented permanent (schedulable) kmaps
15  * based on Linus' idea.
16  *
17  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18  */
19
20 #include <linux/mm.h>
21 #include <linux/export.h>
22 #include <linux/swap.h>
23 #include <linux/bio.h>
24 #include <linux/pagemap.h>
25 #include <linux/mempool.h>
26 #include <linux/init.h>
27 #include <linux/hash.h>
28 #include <linux/highmem.h>
29 #include <linux/kgdb.h>
30 #include <asm/tlbflush.h>
31 #include <linux/vmalloc.h>
32
33 #ifdef CONFIG_KMAP_LOCAL
34 static inline int kmap_local_calc_idx(int idx)
35 {
36         return idx + KM_MAX_IDX * smp_processor_id();
37 }
38
39 #ifndef arch_kmap_local_map_idx
40 #define arch_kmap_local_map_idx(idx, pfn)       kmap_local_calc_idx(idx)
41 #endif
42 #endif /* CONFIG_KMAP_LOCAL */
43
44 /*
45  * Virtual_count is not a pure "count".
46  *  0 means that it is not mapped, and has not been mapped
47  *    since a TLB flush - it is usable.
48  *  1 means that there are no users, but it has been mapped
49  *    since the last TLB flush - so we can't use it.
50  *  n means that there are (n-1) current users of it.
51  */
52 #ifdef CONFIG_HIGHMEM
53
54 /*
55  * Architecture with aliasing data cache may define the following family of
56  * helper functions in its asm/highmem.h to control cache color of virtual
57  * addresses where physical memory pages are mapped by kmap.
58  */
59 #ifndef get_pkmap_color
60
61 /*
62  * Determine color of virtual address where the page should be mapped.
63  */
64 static inline unsigned int get_pkmap_color(struct page *page)
65 {
66         return 0;
67 }
68 #define get_pkmap_color get_pkmap_color
69
70 /*
71  * Get next index for mapping inside PKMAP region for page with given color.
72  */
73 static inline unsigned int get_next_pkmap_nr(unsigned int color)
74 {
75         static unsigned int last_pkmap_nr;
76
77         last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
78         return last_pkmap_nr;
79 }
80
81 /*
82  * Determine if page index inside PKMAP region (pkmap_nr) of given color
83  * has wrapped around PKMAP region end. When this happens an attempt to
84  * flush all unused PKMAP slots is made.
85  */
86 static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
87 {
88         return pkmap_nr == 0;
89 }
90
91 /*
92  * Get the number of PKMAP entries of the given color. If no free slot is
93  * found after checking that many entries, kmap will sleep waiting for
94  * someone to call kunmap and free PKMAP slot.
95  */
96 static inline int get_pkmap_entries_count(unsigned int color)
97 {
98         return LAST_PKMAP;
99 }
100
101 /*
102  * Get head of a wait queue for PKMAP entries of the given color.
103  * Wait queues for different mapping colors should be independent to avoid
104  * unnecessary wakeups caused by freeing of slots of other colors.
105  */
106 static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
107 {
108         static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
109
110         return &pkmap_map_wait;
111 }
112 #endif
113
114 atomic_long_t _totalhigh_pages __read_mostly;
115 EXPORT_SYMBOL(_totalhigh_pages);
116
117 unsigned int __nr_free_highpages(void)
118 {
119         struct zone *zone;
120         unsigned int pages = 0;
121
122         for_each_populated_zone(zone) {
123                 if (is_highmem(zone))
124                         pages += zone_page_state(zone, NR_FREE_PAGES);
125         }
126
127         return pages;
128 }
129
130 static int pkmap_count[LAST_PKMAP];
131 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
132
133 pte_t *pkmap_page_table;
134
135 /*
136  * Most architectures have no use for kmap_high_get(), so let's abstract
137  * the disabling of IRQ out of the locking in that case to save on a
138  * potential useless overhead.
139  */
140 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
141 #define lock_kmap()             spin_lock_irq(&kmap_lock)
142 #define unlock_kmap()           spin_unlock_irq(&kmap_lock)
143 #define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
144 #define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
145 #else
146 #define lock_kmap()             spin_lock(&kmap_lock)
147 #define unlock_kmap()           spin_unlock(&kmap_lock)
148 #define lock_kmap_any(flags)    \
149                 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
150 #define unlock_kmap_any(flags)  \
151                 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
152 #endif
153
154 struct page *__kmap_to_page(void *vaddr)
155 {
156         unsigned long base = (unsigned long) vaddr & PAGE_MASK;
157         struct kmap_ctrl *kctrl = &current->kmap_ctrl;
158         unsigned long addr = (unsigned long)vaddr;
159         int i;
160
161         /* kmap() mappings */
162         if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) &&
163                          addr < PKMAP_ADDR(LAST_PKMAP)))
164                 return pte_page(pkmap_page_table[PKMAP_NR(addr)]);
165
166         /* kmap_local_page() mappings */
167         if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) &&
168                          base < __fix_to_virt(FIX_KMAP_BEGIN))) {
169                 for (i = 0; i < kctrl->idx; i++) {
170                         unsigned long base_addr;
171                         int idx;
172
173                         idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
174                         base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
175
176                         if (base_addr == base)
177                                 return pte_page(kctrl->pteval[i]);
178                 }
179         }
180
181         return virt_to_page(vaddr);
182 }
183 EXPORT_SYMBOL(__kmap_to_page);
184
185 static void flush_all_zero_pkmaps(void)
186 {
187         int i;
188         int need_flush = 0;
189
190         flush_cache_kmaps();
191
192         for (i = 0; i < LAST_PKMAP; i++) {
193                 struct page *page;
194
195                 /*
196                  * zero means we don't have anything to do,
197                  * >1 means that it is still in use. Only
198                  * a count of 1 means that it is free but
199                  * needs to be unmapped
200                  */
201                 if (pkmap_count[i] != 1)
202                         continue;
203                 pkmap_count[i] = 0;
204
205                 /* sanity check */
206                 BUG_ON(pte_none(pkmap_page_table[i]));
207
208                 /*
209                  * Don't need an atomic fetch-and-clear op here;
210                  * no-one has the page mapped, and cannot get at
211                  * its virtual address (and hence PTE) without first
212                  * getting the kmap_lock (which is held here).
213                  * So no dangers, even with speculative execution.
214                  */
215                 page = pte_page(pkmap_page_table[i]);
216                 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
217
218                 set_page_address(page, NULL);
219                 need_flush = 1;
220         }
221         if (need_flush)
222                 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
223 }
224
225 void __kmap_flush_unused(void)
226 {
227         lock_kmap();
228         flush_all_zero_pkmaps();
229         unlock_kmap();
230 }
231
232 static inline unsigned long map_new_virtual(struct page *page)
233 {
234         unsigned long vaddr;
235         int count;
236         unsigned int last_pkmap_nr;
237         unsigned int color = get_pkmap_color(page);
238
239 start:
240         count = get_pkmap_entries_count(color);
241         /* Find an empty entry */
242         for (;;) {
243                 last_pkmap_nr = get_next_pkmap_nr(color);
244                 if (no_more_pkmaps(last_pkmap_nr, color)) {
245                         flush_all_zero_pkmaps();
246                         count = get_pkmap_entries_count(color);
247                 }
248                 if (!pkmap_count[last_pkmap_nr])
249                         break;  /* Found a usable entry */
250                 if (--count)
251                         continue;
252
253                 /*
254                  * Sleep for somebody else to unmap their entries
255                  */
256                 {
257                         DECLARE_WAITQUEUE(wait, current);
258                         wait_queue_head_t *pkmap_map_wait =
259                                 get_pkmap_wait_queue_head(color);
260
261                         __set_current_state(TASK_UNINTERRUPTIBLE);
262                         add_wait_queue(pkmap_map_wait, &wait);
263                         unlock_kmap();
264                         schedule();
265                         remove_wait_queue(pkmap_map_wait, &wait);
266                         lock_kmap();
267
268                         /* Somebody else might have mapped it while we slept */
269                         if (page_address(page))
270                                 return (unsigned long)page_address(page);
271
272                         /* Re-start */
273                         goto start;
274                 }
275         }
276         vaddr = PKMAP_ADDR(last_pkmap_nr);
277         set_pte_at(&init_mm, vaddr,
278                    &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
279
280         pkmap_count[last_pkmap_nr] = 1;
281         set_page_address(page, (void *)vaddr);
282
283         return vaddr;
284 }
285
286 /**
287  * kmap_high - map a highmem page into memory
288  * @page: &struct page to map
289  *
290  * Returns the page's virtual memory address.
291  *
292  * We cannot call this from interrupts, as it may block.
293  */
294 void *kmap_high(struct page *page)
295 {
296         unsigned long vaddr;
297
298         /*
299          * For highmem pages, we can't trust "virtual" until
300          * after we have the lock.
301          */
302         lock_kmap();
303         vaddr = (unsigned long)page_address(page);
304         if (!vaddr)
305                 vaddr = map_new_virtual(page);
306         pkmap_count[PKMAP_NR(vaddr)]++;
307         BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
308         unlock_kmap();
309         return (void *) vaddr;
310 }
311 EXPORT_SYMBOL(kmap_high);
312
313 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
314 /**
315  * kmap_high_get - pin a highmem page into memory
316  * @page: &struct page to pin
317  *
318  * Returns the page's current virtual memory address, or NULL if no mapping
319  * exists.  If and only if a non null address is returned then a
320  * matching call to kunmap_high() is necessary.
321  *
322  * This can be called from any context.
323  */
324 void *kmap_high_get(struct page *page)
325 {
326         unsigned long vaddr, flags;
327
328         lock_kmap_any(flags);
329         vaddr = (unsigned long)page_address(page);
330         if (vaddr) {
331                 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
332                 pkmap_count[PKMAP_NR(vaddr)]++;
333         }
334         unlock_kmap_any(flags);
335         return (void *) vaddr;
336 }
337 #endif
338
339 /**
340  * kunmap_high - unmap a highmem page into memory
341  * @page: &struct page to unmap
342  *
343  * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
344  * only from user context.
345  */
346 void kunmap_high(struct page *page)
347 {
348         unsigned long vaddr;
349         unsigned long nr;
350         unsigned long flags;
351         int need_wakeup;
352         unsigned int color = get_pkmap_color(page);
353         wait_queue_head_t *pkmap_map_wait;
354
355         lock_kmap_any(flags);
356         vaddr = (unsigned long)page_address(page);
357         BUG_ON(!vaddr);
358         nr = PKMAP_NR(vaddr);
359
360         /*
361          * A count must never go down to zero
362          * without a TLB flush!
363          */
364         need_wakeup = 0;
365         switch (--pkmap_count[nr]) {
366         case 0:
367                 BUG();
368         case 1:
369                 /*
370                  * Avoid an unnecessary wake_up() function call.
371                  * The common case is pkmap_count[] == 1, but
372                  * no waiters.
373                  * The tasks queued in the wait-queue are guarded
374                  * by both the lock in the wait-queue-head and by
375                  * the kmap_lock.  As the kmap_lock is held here,
376                  * no need for the wait-queue-head's lock.  Simply
377                  * test if the queue is empty.
378                  */
379                 pkmap_map_wait = get_pkmap_wait_queue_head(color);
380                 need_wakeup = waitqueue_active(pkmap_map_wait);
381         }
382         unlock_kmap_any(flags);
383
384         /* do wake-up, if needed, race-free outside of the spin lock */
385         if (need_wakeup)
386                 wake_up(pkmap_map_wait);
387 }
388 EXPORT_SYMBOL(kunmap_high);
389
390 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
391                 unsigned start2, unsigned end2)
392 {
393         unsigned int i;
394
395         BUG_ON(end1 > page_size(page) || end2 > page_size(page));
396
397         if (start1 >= end1)
398                 start1 = end1 = 0;
399         if (start2 >= end2)
400                 start2 = end2 = 0;
401
402         for (i = 0; i < compound_nr(page); i++) {
403                 void *kaddr = NULL;
404
405                 if (start1 >= PAGE_SIZE) {
406                         start1 -= PAGE_SIZE;
407                         end1 -= PAGE_SIZE;
408                 } else {
409                         unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
410
411                         if (end1 > start1) {
412                                 kaddr = kmap_local_page(page + i);
413                                 memset(kaddr + start1, 0, this_end - start1);
414                         }
415                         end1 -= this_end;
416                         start1 = 0;
417                 }
418
419                 if (start2 >= PAGE_SIZE) {
420                         start2 -= PAGE_SIZE;
421                         end2 -= PAGE_SIZE;
422                 } else {
423                         unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
424
425                         if (end2 > start2) {
426                                 if (!kaddr)
427                                         kaddr = kmap_local_page(page + i);
428                                 memset(kaddr + start2, 0, this_end - start2);
429                         }
430                         end2 -= this_end;
431                         start2 = 0;
432                 }
433
434                 if (kaddr) {
435                         kunmap_local(kaddr);
436                         flush_dcache_page(page + i);
437                 }
438
439                 if (!end1 && !end2)
440                         break;
441         }
442
443         BUG_ON((start1 | start2 | end1 | end2) != 0);
444 }
445 EXPORT_SYMBOL(zero_user_segments);
446 #endif /* CONFIG_HIGHMEM */
447
448 #ifdef CONFIG_KMAP_LOCAL
449
450 #include <asm/kmap_size.h>
451
452 /*
453  * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second
454  * slot is unused which acts as a guard page
455  */
456 #ifdef CONFIG_DEBUG_KMAP_LOCAL
457 # define KM_INCR        2
458 #else
459 # define KM_INCR        1
460 #endif
461
462 static inline int kmap_local_idx_push(void)
463 {
464         WARN_ON_ONCE(in_hardirq() && !irqs_disabled());
465         current->kmap_ctrl.idx += KM_INCR;
466         BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
467         return current->kmap_ctrl.idx - 1;
468 }
469
470 static inline int kmap_local_idx(void)
471 {
472         return current->kmap_ctrl.idx - 1;
473 }
474
475 static inline void kmap_local_idx_pop(void)
476 {
477         current->kmap_ctrl.idx -= KM_INCR;
478         BUG_ON(current->kmap_ctrl.idx < 0);
479 }
480
481 #ifndef arch_kmap_local_post_map
482 # define arch_kmap_local_post_map(vaddr, pteval)        do { } while (0)
483 #endif
484
485 #ifndef arch_kmap_local_pre_unmap
486 # define arch_kmap_local_pre_unmap(vaddr)               do { } while (0)
487 #endif
488
489 #ifndef arch_kmap_local_post_unmap
490 # define arch_kmap_local_post_unmap(vaddr)              do { } while (0)
491 #endif
492
493 #ifndef arch_kmap_local_unmap_idx
494 #define arch_kmap_local_unmap_idx(idx, vaddr)   kmap_local_calc_idx(idx)
495 #endif
496
497 #ifndef arch_kmap_local_high_get
498 static inline void *arch_kmap_local_high_get(struct page *page)
499 {
500         return NULL;
501 }
502 #endif
503
504 #ifndef arch_kmap_local_set_pte
505 #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev)  \
506         set_pte_at(mm, vaddr, ptep, ptev)
507 #endif
508
509 /* Unmap a local mapping which was obtained by kmap_high_get() */
510 static inline bool kmap_high_unmap_local(unsigned long vaddr)
511 {
512 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
513         if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
514                 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
515                 return true;
516         }
517 #endif
518         return false;
519 }
520
521 static pte_t *__kmap_pte;
522
523 static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
524 {
525         if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
526                 /*
527                  * Set by the arch if __kmap_pte[-idx] does not produce
528                  * the correct entry.
529                  */
530                 return virt_to_kpte(vaddr);
531         if (!__kmap_pte)
532                 __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
533         return &__kmap_pte[-idx];
534 }
535
536 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
537 {
538         pte_t pteval, *kmap_pte;
539         unsigned long vaddr;
540         int idx;
541
542         /*
543          * Disable migration so resulting virtual address is stable
544          * across preemption.
545          */
546         migrate_disable();
547         preempt_disable();
548         idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
549         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
550         kmap_pte = kmap_get_pte(vaddr, idx);
551         BUG_ON(!pte_none(*kmap_pte));
552         pteval = pfn_pte(pfn, prot);
553         arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
554         arch_kmap_local_post_map(vaddr, pteval);
555         current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
556         preempt_enable();
557
558         return (void *)vaddr;
559 }
560 EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
561
562 void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
563 {
564         void *kmap;
565
566         /*
567          * To broaden the usage of the actual kmap_local() machinery always map
568          * pages when debugging is enabled and the architecture has no problems
569          * with alias mappings.
570          */
571         if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
572                 return page_address(page);
573
574         /* Try kmap_high_get() if architecture has it enabled */
575         kmap = arch_kmap_local_high_get(page);
576         if (kmap)
577                 return kmap;
578
579         return __kmap_local_pfn_prot(page_to_pfn(page), prot);
580 }
581 EXPORT_SYMBOL(__kmap_local_page_prot);
582
583 void kunmap_local_indexed(const void *vaddr)
584 {
585         unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
586         pte_t *kmap_pte;
587         int idx;
588
589         if (addr < __fix_to_virt(FIX_KMAP_END) ||
590             addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
591                 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) {
592                         /* This _should_ never happen! See above. */
593                         WARN_ON_ONCE(1);
594                         return;
595                 }
596                 /*
597                  * Handle mappings which were obtained by kmap_high_get()
598                  * first as the virtual address of such mappings is below
599                  * PAGE_OFFSET. Warn for all other addresses which are in
600                  * the user space part of the virtual address space.
601                  */
602                 if (!kmap_high_unmap_local(addr))
603                         WARN_ON_ONCE(addr < PAGE_OFFSET);
604                 return;
605         }
606
607         preempt_disable();
608         idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
609         WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
610
611         kmap_pte = kmap_get_pte(addr, idx);
612         arch_kmap_local_pre_unmap(addr);
613         pte_clear(&init_mm, addr, kmap_pte);
614         arch_kmap_local_post_unmap(addr);
615         current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
616         kmap_local_idx_pop();
617         preempt_enable();
618         migrate_enable();
619 }
620 EXPORT_SYMBOL(kunmap_local_indexed);
621
622 /*
623  * Invoked before switch_to(). This is safe even when during or after
624  * clearing the maps an interrupt which needs a kmap_local happens because
625  * the task::kmap_ctrl.idx is not modified by the unmapping code so a
626  * nested kmap_local will use the next unused index and restore the index
627  * on unmap. The already cleared kmaps of the outgoing task are irrelevant
628  * because the interrupt context does not know about them. The same applies
629  * when scheduling back in for an interrupt which happens before the
630  * restore is complete.
631  */
632 void __kmap_local_sched_out(void)
633 {
634         struct task_struct *tsk = current;
635         pte_t *kmap_pte;
636         int i;
637
638         /* Clear kmaps */
639         for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
640                 pte_t pteval = tsk->kmap_ctrl.pteval[i];
641                 unsigned long addr;
642                 int idx;
643
644                 /* With debug all even slots are unmapped and act as guard */
645                 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
646                         WARN_ON_ONCE(pte_val(pteval) != 0);
647                         continue;
648                 }
649                 if (WARN_ON_ONCE(pte_none(pteval)))
650                         continue;
651
652                 /*
653                  * This is a horrible hack for XTENSA to calculate the
654                  * coloured PTE index. Uses the PFN encoded into the pteval
655                  * and the map index calculation because the actual mapped
656                  * virtual address is not stored in task::kmap_ctrl.
657                  * For any sane architecture this is optimized out.
658                  */
659                 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
660
661                 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
662                 kmap_pte = kmap_get_pte(addr, idx);
663                 arch_kmap_local_pre_unmap(addr);
664                 pte_clear(&init_mm, addr, kmap_pte);
665                 arch_kmap_local_post_unmap(addr);
666         }
667 }
668
669 void __kmap_local_sched_in(void)
670 {
671         struct task_struct *tsk = current;
672         pte_t *kmap_pte;
673         int i;
674
675         /* Restore kmaps */
676         for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
677                 pte_t pteval = tsk->kmap_ctrl.pteval[i];
678                 unsigned long addr;
679                 int idx;
680
681                 /* With debug all even slots are unmapped and act as guard */
682                 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
683                         WARN_ON_ONCE(pte_val(pteval) != 0);
684                         continue;
685                 }
686                 if (WARN_ON_ONCE(pte_none(pteval)))
687                         continue;
688
689                 /* See comment in __kmap_local_sched_out() */
690                 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
691                 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
692                 kmap_pte = kmap_get_pte(addr, idx);
693                 set_pte_at(&init_mm, addr, kmap_pte, pteval);
694                 arch_kmap_local_post_map(addr, pteval);
695         }
696 }
697
698 void kmap_local_fork(struct task_struct *tsk)
699 {
700         if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
701                 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
702 }
703
704 #endif
705
706 #if defined(HASHED_PAGE_VIRTUAL)
707
708 #define PA_HASH_ORDER   7
709
710 /*
711  * Describes one page->virtual association
712  */
713 struct page_address_map {
714         struct page *page;
715         void *virtual;
716         struct list_head list;
717 };
718
719 static struct page_address_map page_address_maps[LAST_PKMAP];
720
721 /*
722  * Hash table bucket
723  */
724 static struct page_address_slot {
725         struct list_head lh;                    /* List of page_address_maps */
726         spinlock_t lock;                        /* Protect this bucket's list */
727 } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
728
729 static struct page_address_slot *page_slot(const struct page *page)
730 {
731         return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
732 }
733
734 /**
735  * page_address - get the mapped virtual address of a page
736  * @page: &struct page to get the virtual address of
737  *
738  * Returns the page's virtual address.
739  */
740 void *page_address(const struct page *page)
741 {
742         unsigned long flags;
743         void *ret;
744         struct page_address_slot *pas;
745
746         if (!PageHighMem(page))
747                 return lowmem_page_address(page);
748
749         pas = page_slot(page);
750         ret = NULL;
751         spin_lock_irqsave(&pas->lock, flags);
752         if (!list_empty(&pas->lh)) {
753                 struct page_address_map *pam;
754
755                 list_for_each_entry(pam, &pas->lh, list) {
756                         if (pam->page == page) {
757                                 ret = pam->virtual;
758                                 break;
759                         }
760                 }
761         }
762
763         spin_unlock_irqrestore(&pas->lock, flags);
764         return ret;
765 }
766 EXPORT_SYMBOL(page_address);
767
768 /**
769  * set_page_address - set a page's virtual address
770  * @page: &struct page to set
771  * @virtual: virtual address to use
772  */
773 void set_page_address(struct page *page, void *virtual)
774 {
775         unsigned long flags;
776         struct page_address_slot *pas;
777         struct page_address_map *pam;
778
779         BUG_ON(!PageHighMem(page));
780
781         pas = page_slot(page);
782         if (virtual) {          /* Add */
783                 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
784                 pam->page = page;
785                 pam->virtual = virtual;
786
787                 spin_lock_irqsave(&pas->lock, flags);
788                 list_add_tail(&pam->list, &pas->lh);
789                 spin_unlock_irqrestore(&pas->lock, flags);
790         } else {                /* Remove */
791                 spin_lock_irqsave(&pas->lock, flags);
792                 list_for_each_entry(pam, &pas->lh, list) {
793                         if (pam->page == page) {
794                                 list_del(&pam->list);
795                                 break;
796                         }
797                 }
798                 spin_unlock_irqrestore(&pas->lock, flags);
799         }
800
801         return;
802 }
803
804 void __init page_address_init(void)
805 {
806         int i;
807
808         for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
809                 INIT_LIST_HEAD(&page_address_htable[i].lh);
810                 spin_lock_init(&page_address_htable[i].lock);
811         }
812 }
813
814 #endif  /* defined(HASHED_PAGE_VIRTUAL) */