x86: don't wank with magical size in __copy_in_user()
... especially since copy_in_user() doesn't Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
3f763453e6
commit
a41e0d7542
|
@ -185,62 +185,8 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|||
static __always_inline __must_check
|
||||
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
might_fault();
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic((__force void *)dst,
|
||||
(__force void *)src, size);
|
||||
switch (size) {
|
||||
case 1: {
|
||||
u8 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u8 __user *)src,
|
||||
ret, "b", "b", "=q", 1);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u8 __user *)dst,
|
||||
ret, "b", "b", "iq", 1);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
case 2: {
|
||||
u16 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u16 __user *)src,
|
||||
ret, "w", "w", "=r", 2);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u16 __user *)dst,
|
||||
ret, "w", "w", "ir", 2);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
|
||||
case 4: {
|
||||
u32 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u32 __user *)src,
|
||||
ret, "l", "k", "=r", 4);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u32 __user *)dst,
|
||||
ret, "l", "k", "ir", 4);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
case 8: {
|
||||
u64 tmp;
|
||||
__uaccess_begin();
|
||||
__get_user_asm(tmp, (u64 __user *)src,
|
||||
ret, "q", "", "=r", 8);
|
||||
if (likely(!ret))
|
||||
__put_user_asm(tmp, (u64 __user *)dst,
|
||||
ret, "q", "", "er", 8);
|
||||
__uaccess_end();
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
return copy_user_generic((__force void *)dst,
|
||||
(__force void *)src, size);
|
||||
}
|
||||
return copy_user_generic((__force void *)dst,
|
||||
(__force void *)src, size);
|
||||
}
|
||||
|
||||
static __must_check __always_inline int
|
||||
|
|
|
@ -54,15 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
|
|||
}
|
||||
EXPORT_SYMBOL(clear_user);
|
||||
|
||||
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
|
||||
return copy_user_generic((__force void *)to, (__force void *)from, len);
|
||||
}
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL(copy_in_user);
|
||||
|
||||
/*
|
||||
* Try to copy last bytes and clear the rest if needed.
|
||||
* Since protection fault in copy_from/to_user is not a normal situation,
|
||||
|
|
Loading…
Reference in New Issue