#include <asm/cacheflush.h>
-extern void __kunmap_atomic(void *kvaddr);
-
extern void kmap_init(void);
static inline void flush_cache_kmaps(void)
}
EXPORT_SYMBOL(kmap_atomic_high);
-void __kunmap_atomic(void *kv)
+void kunmap_atomic_high(void *kv)
{
unsigned long kvaddr = (unsigned long)kv;
kmap_atomic_idx_pop();
}
-
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
* when CONFIG_HIGHMEM is not set.
*/
#ifdef CONFIG_HIGHMEM
-extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
#endif
}
EXPORT_SYMBOL(kmap_atomic_high);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx, type;
/* this address was obtained through kmap_high_get() */
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
void *kmap_atomic_pfn(unsigned long pfn)
{
#define ARCH_HAS_KMAP_FLUSH_TLB
extern void kmap_flush_tlb(unsigned long addr);
-extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
}
EXPORT_SYMBOL(kmap_atomic_high);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int idx;
if (vaddr < FIXADDR_START)
- goto out;
+ return;
#ifdef CONFIG_DEBUG_HIGHMEM
idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
(void) idx; /* to kill a warning */
#endif
kmap_atomic_idx_pop();
-out:
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
return kmap_atomic_high_prot(page, prot);
}
-extern void __kunmap_atomic(void *kvaddr);
static inline void *kmap_atomic_high(struct page *page)
{
}
EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
unsigned int idx;
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- preempt_enable();
+ if (vaddr < __fix_to_virt(FIX_KMAP_END))
return;
- }
type = kmap_atomic_idx();
local_flush_tlb_page(NULL, vaddr);
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
#define ARCH_HAS_KMAP_FLUSH_TLB
extern void kmap_flush_tlb(unsigned long addr);
-extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
flush_data_cache_page(addr);
if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ kunmap_atomic((void *)addr);
}
EXPORT_SYMBOL(__flush_dcache_page);
flush_data_cache_page(addr);
if (PageHighMem(page))
- __kunmap_atomic((void *)addr);
+ kunmap_atomic((void *)addr);
ClearPageDcacheDirty(page);
}
}
EXPORT_SYMBOL(kmap_atomic_high);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type __maybe_unused;
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- preempt_enable();
+ if (vaddr < FIXADDR_START)
return;
- }
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
}
#endif
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* when CONFIG_HIGHMEM is not set.
*/
#ifdef CONFIG_HIGHMEM
-extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
extern struct page *kmap_atomic_to_page(void *ptr);
#endif
}
EXPORT_SYMBOL(kmap_atomic_high);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
if (kvaddr >= (void *)FIXADDR_START) {
unsigned long vaddr = (unsigned long)kvaddr;
ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
set_pte(ptep, 0);
}
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
return page_address(page);
}
-static inline void __kunmap_atomic(void *addr)
+static inline void kunmap_atomic_high(void *addr)
{
flush_kernel_dcache_page_addr(addr);
- pagefault_enable();
- preempt_enable();
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
return kmap_atomic_high_prot(page, prot);
}
-extern void __kunmap_atomic(void *kvaddr);
static inline void *kmap_atomic_high(struct page *page)
{
}
EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- preempt_enable();
+ if (vaddr < __fix_to_virt(FIX_KMAP_END))
return;
- }
if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
int type = kmap_atomic_idx();
}
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
-void __kunmap_atomic(void *kvaddr);
-
#define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */
}
EXPORT_SYMBOL(kmap_atomic_high);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
- preempt_enable();
+ if (vaddr < FIXADDR_START)
return;
- }
type = kmap_atomic_idx();
#endif
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
{
return kmap_atomic_high_prot(page, kmap_prot);
}
-void __kunmap_atomic(void *kvaddr);
void *kmap_atomic_pfn(unsigned long pfn);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
BUG_ON(vaddr >= (unsigned long)high_memory);
}
#endif
-
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
void __init set_highmem_pages_init(void)
{
flush_cache_all();
}
-void __kunmap_atomic(void *kvaddr);
-
void kmap_init(void);
#endif
}
EXPORT_SYMBOL(kmap_atomic_high);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
if (kvaddr >= (void *)FIXADDR_START &&
kvaddr < (void *)FIXADDR_TOP) {
kmap_atomic_idx_pop();
}
-
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
void __init kmap_init(void)
{
#ifdef CONFIG_HIGHMEM
extern void *kmap_atomic_high(struct page *page);
+extern void kunmap_atomic_high(void *kvaddr);
#include <asm/highmem.h>
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
-static inline void __kunmap_atomic(void *addr)
+static inline void kunmap_atomic_high(void *addr)
{
- pagefault_enable();
- preempt_enable();
+ /*
+ * Nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+ * handles re-enabling faults + preemption
+ */
}
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kunmap_atomic(addr) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic(addr); \
+ kunmap_atomic_high(addr); \
+ pagefault_enable(); \
+ preempt_enable(); \
} while (0)