void kmap_flush_unused(void);
+struct page *kmap_to_page(void *addr);
+
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
#endif
+struct page *kmap_to_page(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
+ int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
+ return pte_page(pkmap_page_table[i]);
+ }
+
+ return virt_to_page(addr);
+}
+
static void flush_all_zero_pkmaps(void)
{
int i;
struct file *swap_file = sis->swap_file;
struct address_space *mapping = swap_file->f_mapping;
struct iovec iov = {
- .iov_base = page_address(page),
+ .iov_base = kmap(page),
.iov_len = PAGE_SIZE,
};
ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
&kiocb, &iov,
kiocb.ki_pos, 1);
+ kunmap(page);
if (ret == PAGE_SIZE) {
count_vm_event(PSWPOUT);
ret = 0;
if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
return seg;
- /* virt_to_page sanity checks the PFN */
- pages[seg] = virt_to_page(kiov[seg].iov_base);
+ pages[seg] = kmap_to_page(kiov[seg].iov_base);
page_cache_get(pages[seg]);
}