return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
-#ifdef CONFIG_MMU
-/*
- * For fun, we are using the MMU for this.
- */
-static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
-{
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long addr=(unsigned long)buf;
-
- mm = current->mm;
- /* Oops, this was forgotten before. -ben */
- down_read(&mm->mmap_sem);
-
- /* For private mappings, just map in zero pages. */
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
- unsigned long count;
-
- if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
- goto out_up;
- if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
- break;
- count = vma->vm_end - addr;
- if (count > size)
- count = size;
-
- zap_page_range(vma, addr, count, NULL);
- if (zeromap_page_range(vma, addr, count, PAGE_COPY))
- break;
-
- size -= count;
- buf += count;
- addr += count;
- if (size == 0)
- goto out_up;
- }
-
- up_read(&mm->mmap_sem);
-
- /* The shared case is hard. Let's do the conventional zeroing. */
- do {
- unsigned long unwritten = clear_user(buf, PAGE_SIZE);
- if (unwritten)
- return size + unwritten - PAGE_SIZE;
- cond_resched();
- buf += PAGE_SIZE;
- size -= PAGE_SIZE;
- } while (size);
-
- return size;
-out_up:
- up_read(&mm->mmap_sem);
- return size;
-}
-
static ssize_t read_zero(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned long left, unwritten, written = 0;
+ size_t written;
if (!count)
return 0;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
- left = count;
-
- /* do we want to be clever? Arbitrary cut-off */
- if (count >= PAGE_SIZE*4) {
- unsigned long partial;
+ written = 0;
+ while (count) {
+ unsigned long unwritten;
+ size_t chunk = count;
- /* How much left of the page? */
- partial = (PAGE_SIZE-1) & -(unsigned long) buf;
- unwritten = clear_user(buf, partial);
- written = partial - unwritten;
- if (unwritten)
- goto out;
- left -= partial;
- buf += partial;
- unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
- written += (left & PAGE_MASK) - unwritten;
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE; /* Just for latency reasons */
+ unwritten = clear_user(buf, chunk);
+ written += chunk - unwritten;
if (unwritten)
- goto out;
- buf += left & PAGE_MASK;
- left &= ~PAGE_MASK;
- }
- unwritten = clear_user(buf, left);
- written += left - unwritten;
-out:
- return written ? written : -EFAULT;
-}
-
-static int mmap_zero(struct file * file, struct vm_area_struct * vma)
-{
- int err;
-
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- err = zeromap_page_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, vma->vm_page_prot);
- BUG_ON(err == -EEXIST);
- return err;
-}
-#else /* CONFIG_MMU */
-static ssize_t read_zero(struct file * file, char * buf,
- size_t count, loff_t *ppos)
-{
- size_t todo = count;
-
- while (todo) {
- size_t chunk = todo;
-
- if (chunk > 4096)
- chunk = 4096; /* Just for latency reasons */
- if (clear_user(buf, chunk))
- return -EFAULT;
+ break;
buf += chunk;
- todo -= chunk;
+ count -= chunk;
cond_resched();
}
- return count;
+ return written ? written : -EFAULT;
}
static int mmap_zero(struct file * file, struct vm_area_struct * vma)
{
+#ifndef CONFIG_MMU
return -ENOSYS;
+#endif
+ if (vma->vm_flags & VM_SHARED)
+ return shmem_zero_setup(vma);
+ return 0;
}
-#endif /* CONFIG_MMU */
static ssize_t write_full(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
* has touched so far, we don't want to allocate page tables.
*/
if (flags & FOLL_ANON) {
- page = ZERO_PAGE(address);
+ page = ZERO_PAGE(0);
if (flags & FOLL_GET)
get_page(page);
BUG_ON(flags & FOLL_WRITE);
}
EXPORT_SYMBOL(get_user_pages);
-static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr, unsigned long end, pgprot_t prot)
-{
- pte_t *pte;
- spinlock_t *ptl;
- int err = 0;
-
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
- if (!pte)
- return -EAGAIN;
- arch_enter_lazy_mmu_mode();
- do {
- struct page *page = ZERO_PAGE(addr);
- pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
-
- if (unlikely(!pte_none(*pte))) {
- err = -EEXIST;
- pte++;
- break;
- }
- page_cache_get(page);
- page_add_file_rmap(page);
- inc_mm_counter(mm, file_rss);
- set_pte_at(mm, addr, pte, zero_pte);
- } while (pte++, addr += PAGE_SIZE, addr != end);
- arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(pte - 1, ptl);
- return err;
-}
-
-static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end, pgprot_t prot)
-{
- pmd_t *pmd;
- unsigned long next;
- int err;
-
- pmd = pmd_alloc(mm, pud, addr);
- if (!pmd)
- return -EAGAIN;
- do {
- next = pmd_addr_end(addr, end);
- err = zeromap_pte_range(mm, pmd, addr, next, prot);
- if (err)
- break;
- } while (pmd++, addr = next, addr != end);
- return err;
-}
-
-static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end, pgprot_t prot)
-{
- pud_t *pud;
- unsigned long next;
- int err;
-
- pud = pud_alloc(mm, pgd, addr);
- if (!pud)
- return -EAGAIN;
- do {
- next = pud_addr_end(addr, end);
- err = zeromap_pmd_range(mm, pud, addr, next, prot);
- if (err)
- break;
- } while (pud++, addr = next, addr != end);
- return err;
-}
-
-int zeromap_page_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long size, pgprot_t prot)
-{
- pgd_t *pgd;
- unsigned long next;
- unsigned long end = addr + size;
- struct mm_struct *mm = vma->vm_mm;
- int err;
-
- BUG_ON(addr >= end);
- pgd = pgd_offset(mm, addr);
- flush_cache_range(vma, addr, end);
- do {
- next = pgd_addr_end(addr, end);
- err = zeromap_pud_range(mm, pgd, addr, next, prot);
- if (err)
- break;
- } while (pgd++, addr = next, addr != end);
- return err;
-}
-
pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
if (unlikely(anon_vma_prepare(vma)))
goto oom;
- if (old_page == ZERO_PAGE(address)) {
- new_page = alloc_zeroed_user_highpage_movable(vma, address);
- if (!new_page)
- goto oom;
- } else {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
- if (!new_page)
- goto oom;
- cow_user_page(new_page, old_page, address, vma);
- }
+ VM_BUG_ON(old_page == ZERO_PAGE(0));
+ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (!new_page)
+ goto oom;
+ cow_user_page(new_page, old_page, address, vma);
/*
* Re-check the pte - we dropped the lock
spinlock_t *ptl;
pte_t entry;
- if (write_access) {
- /* Allocate our own private page. */
- pte_unmap(page_table);
-
- if (unlikely(anon_vma_prepare(vma)))
- goto oom;
- page = alloc_zeroed_user_highpage_movable(vma, address);
- if (!page)
- goto oom;
-
- entry = mk_pte(page, vma->vm_page_prot);
- entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ /* Allocate our own private page. */
+ pte_unmap(page_table);
- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
- if (!pte_none(*page_table))
- goto release;
- inc_mm_counter(mm, anon_rss);
- lru_cache_add_active(page);
- page_add_new_anon_rmap(page, vma, address);
- } else {
- /* Map the ZERO_PAGE - vm_page_prot is readonly */
- page = ZERO_PAGE(address);
- page_cache_get(page);
- entry = mk_pte(page, vma->vm_page_prot);
+ if (unlikely(anon_vma_prepare(vma)))
+ goto oom;
+ page = alloc_zeroed_user_highpage_movable(vma, address);
+ if (!page)
+ goto oom;
- ptl = pte_lockptr(mm, pmd);
- spin_lock(ptl);
- if (!pte_none(*page_table))
- goto release;
- inc_mm_counter(mm, file_rss);
- page_add_file_rmap(page);
- }
+ entry = mk_pte(page, vma->vm_page_prot);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+ if (!pte_none(*page_table))
+ goto release;
+ inc_mm_counter(mm, anon_rss);
+ lru_cache_add_active(page);
+ page_add_new_anon_rmap(page, vma, address);
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */