}
static __always_inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n)
+ const void __user *from, unsigned long n, unsigned long total)
{
might_fault();
if (__builtin_constant_p(n)) {
static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
- unsigned long n)
+ unsigned long n, unsigned long total)
{
return __copy_from_user_ll_nocache_nozero(to, from, n);
}
unsigned size, int zerorest);
static inline int __copy_from_user_nocache(void *dst, const void __user *src,
- unsigned size)
+ unsigned size, unsigned long total)
{
might_sleep();
/*
}
static inline int __copy_from_user_inatomic_nocache(void *dst,
- const void __user *src,
- unsigned size)
+ const void __user *src, unsigned size, unsigned total)
{
if (likely(size >= PAGE_SIZE))
return __copy_user_nocache(dst, src, size, 0);
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
- user_data, length);
+ user_data, length, length);
io_mapping_unmap_atomic(vaddr_atomic);
if (unwritten)
return -EFAULT;
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
- const void __user *from, unsigned long n)
+ const void __user *from, unsigned long n, unsigned long total)
{
return __copy_from_user_inatomic(to, from, n);
}
static inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n)
+ const void __user *from, unsigned long n, unsigned long total)
{
return __copy_from_user(to, from, n);
}
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
- size_t copied = 0, left = 0;
+ size_t copied = 0, left = 0, total = bytes;
while (bytes) {
char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base);
base = 0;
- left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+ left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total);
copied += copy;
bytes -= copy;
vaddr += copy;
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
+
left = __copy_from_user_inatomic_nocache(kaddr + offset,
- buf, bytes);
+ buf, bytes, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
- left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+
+ left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
copied = bytes - left;
} else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
break;
copied = bytes -
- __copy_from_user_nocache(xip_mem + offset, buf, bytes);
+ __copy_from_user_nocache(xip_mem + offset, buf, bytes, bytes);
if (likely(copied > 0)) {
status = copied;