uaccess: fix sparse warning on get/put_user for bitwise types
At the moment, if p and x are both tagged as bitwise types, some of get_user(x, p), put_user(x, p), __get_user(x, p), __put_user(x, p) might produce a sparse warning on many architectures. This is a false positive: *p on these architectures is loaded into long (typically using asm), then cast back to typeof(*p). When typeof(*p) is a bitwise type (which is uncommon), such a cast needs __force, otherwise sparse produces a warning. Some architectures already have the __force tag, add it where it's missing. I verified that adding these __force casts does not supress any useful warnings. Specifically, vhost wants to read/write bitwise types in userspace memory using get_user/put_user. At the moment this triggers sparse errors, since the value is passed through an integer. For example: __le32 __user *p; __u32 x; both put_user(x, p); and get_user(x, p); should be safe, but produce warnings on some architectures. While there, I noticed that a bunch of architectures violated coding style rules within uaccess macros. Included patches to fix them up. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJUtS+YAAoJECgfDbjSjVRpQ/QIAKXOc6tMXo+r/F32YC0Fv74G W4VKIk7u9XQNjOzez9i+xce75YBDBKHk5R9kLCfAg6Zew+6NRgbBV+QjGVB8dpot 2GxajcVhOySgaR45sGK3Ldg5yVz5ficqZEyYWKNgYeyMWJdlpvUk+4W5q15TiPZe u+C57/KzfRMDHyv3UkwAbqrkYGE0h7vXBi0BmOdCJlbKjG+6kFoVU/dAWsByDD5p q54ji8UdIkh2oyH5qhSbAwQN4Cg5N37Agw86HwltjQFJAVvV3yPRUsv7MQnpRB1+ hKlPXPUarNozGVV7OlcvGa9Lvz8m3a2rNd9+1tgHY0Fpia1JYAY2UdubS99fl5E= =LVcN -----END PGP SIGNATURE----- Merge tag 'uaccess_for_upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost into asm-generic Merge "uaccess: fix sparse warning on get/put_user for bitwise types" from Michael S. Tsirkin: At the moment, if p and x are both tagged as bitwise types, some of get_user(x, p), put_user(x, p), __get_user(x, p), __put_user(x, p) might produce a sparse warning on many architectures. This is a false positive: *p on these architectures is loaded into long (typically using asm), then cast back to typeof(*p). When typeof(*p) is a bitwise type (which is uncommon), such a cast needs __force, otherwise sparse produces a warning. Some architectures already have the __force tag, add it where it's missing. I verified that adding these __force casts does not supress any useful warnings. Specifically, vhost wants to read/write bitwise types in userspace memory using get_user/put_user. At the moment this triggers sparse errors, since the value is passed through an integer. For example: __le32 __user *p; __u32 x; both put_user(x, p); and get_user(x, p); should be safe, but produce warnings on some architectures. While there, I noticed that a bunch of architectures violated coding style rules within uaccess macros. Included patches to fix them up. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> * tag 'uaccess_for_upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (37 commits) sparc32: nocheck uaccess coding style tweaks sparc64: nocheck uaccess coding style tweaks xtensa: macro whitespace fixes sh: macro whitespace fixes parisc: macro whitespace fixes m68k: macro whitespace fixes m32r: macro whitespace fixes frv: macro whitespace fixes cris: macro whitespace fixes avr32: macro whitespace fixes arm64: macro whitespace fixes arm: macro whitespace fixes alpha: macro whitespace fixes blackfin: macro whitespace fixes sparc64: uaccess_64 macro whitespace fixes sparc32: uaccess_32 macro whitespace fixes avr32: whitespace fix sh: fix put_user sparse errors metag: fix put_user sparse errors ia64: fix put_user sparse errors ...
This commit is contained in:
commit
643165c8bb
|
@ -27,7 +27,7 @@
|
|||
#define get_ds() (KERNEL_DS)
|
||||
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
/*
|
||||
* Is a address valid? This does a straightforward calculation rather
|
||||
|
@ -39,13 +39,13 @@
|
|||
* - AND "addr+size" doesn't have any high-bits set
|
||||
* - OR we are in kernel mode.
|
||||
*/
|
||||
#define __access_ok(addr,size,segment) \
|
||||
#define __access_ok(addr, size, segment) \
|
||||
(((segment).seg & (addr | size | (addr+size))) == 0)
|
||||
|
||||
#define access_ok(type,addr,size) \
|
||||
#define access_ok(type, addr, size) \
|
||||
({ \
|
||||
__chk_user_ptr(addr); \
|
||||
__access_ok(((unsigned long)(addr)),(size),get_fs()); \
|
||||
__access_ok(((unsigned long)(addr)), (size), get_fs()); \
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -60,20 +60,20 @@
|
|||
* (a) re-use the arguments for side effects (sizeof/typeof is ok)
|
||||
* (b) require any knowledge of processes at this stage
|
||||
*/
|
||||
#define put_user(x,ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
|
||||
#define get_user(x,ptr) \
|
||||
__get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
|
||||
#define put_user(x, ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs())
|
||||
#define get_user(x, ptr) \
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
|
||||
|
||||
/*
|
||||
* The "__xxx" versions do not do address space checking, useful when
|
||||
* doing multiple accesses to the same area (the programmer has to do the
|
||||
* checks by hand with "access_ok()")
|
||||
*/
|
||||
#define __put_user(x,ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
|
||||
#define __get_user(x,ptr) \
|
||||
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
#define __put_user(x, ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
|
||||
|
@ -84,7 +84,7 @@
|
|||
|
||||
extern void __get_user_unknown(void);
|
||||
|
||||
#define __get_user_nocheck(x,ptr,size) \
|
||||
#define __get_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = 0; \
|
||||
unsigned long __gu_val; \
|
||||
|
@ -96,16 +96,16 @@ extern void __get_user_unknown(void);
|
|||
case 8: __get_user_64(ptr); break; \
|
||||
default: __get_user_unknown(); break; \
|
||||
} \
|
||||
(x) = (__typeof__(*(ptr))) __gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_check(x,ptr,size,segment) \
|
||||
#define __get_user_check(x, ptr, size, segment) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT; \
|
||||
unsigned long __gu_val = 0; \
|
||||
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
|
||||
if (__access_ok((unsigned long)__gu_addr, size, segment)) { \
|
||||
__gu_err = 0; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_8(__gu_addr); break; \
|
||||
|
@ -115,7 +115,7 @@ extern void __get_user_unknown(void);
|
|||
default: __get_user_unknown(); break; \
|
||||
} \
|
||||
} \
|
||||
(x) = (__typeof__(*(ptr))) __gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
@ -201,31 +201,31 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
|
||||
extern void __put_user_unknown(void);
|
||||
|
||||
#define __put_user_nocheck(x,ptr,size) \
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (size) { \
|
||||
case 1: __put_user_8(x,ptr); break; \
|
||||
case 2: __put_user_16(x,ptr); break; \
|
||||
case 4: __put_user_32(x,ptr); break; \
|
||||
case 8: __put_user_64(x,ptr); break; \
|
||||
case 1: __put_user_8(x, ptr); break; \
|
||||
case 2: __put_user_16(x, ptr); break; \
|
||||
case 4: __put_user_32(x, ptr); break; \
|
||||
case 8: __put_user_64(x, ptr); break; \
|
||||
default: __put_user_unknown(); break; \
|
||||
} \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_check(x,ptr,size,segment) \
|
||||
#define __put_user_check(x, ptr, size, segment) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
|
||||
if (__access_ok((unsigned long)__pu_addr, size, segment)) { \
|
||||
__pu_err = 0; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_8(x,__pu_addr); break; \
|
||||
case 2: __put_user_16(x,__pu_addr); break; \
|
||||
case 4: __put_user_32(x,__pu_addr); break; \
|
||||
case 8: __put_user_64(x,__pu_addr); break; \
|
||||
case 1: __put_user_8(x, __pu_addr); break; \
|
||||
case 2: __put_user_16(x, __pu_addr); break; \
|
||||
case 4: __put_user_32(x, __pu_addr); break; \
|
||||
case 8: __put_user_64(x, __pu_addr); break; \
|
||||
default: __put_user_unknown(); break; \
|
||||
} \
|
||||
} \
|
||||
|
@ -237,7 +237,7 @@ extern void __put_user_unknown(void);
|
|||
* instead of writing: this is because they do not write to
|
||||
* any memory gcc knows about, so there are no aliasing issues
|
||||
*/
|
||||
#define __put_user_64(x,addr) \
|
||||
#define __put_user_64(x, addr) \
|
||||
__asm__ __volatile__("1: stq %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
|
@ -247,7 +247,7 @@ __asm__ __volatile__("1: stq %r2,%1\n" \
|
|||
: "=r"(__pu_err) \
|
||||
: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
|
||||
|
||||
#define __put_user_32(x,addr) \
|
||||
#define __put_user_32(x, addr) \
|
||||
__asm__ __volatile__("1: stl %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
|
@ -260,7 +260,7 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
|
|||
#ifdef __alpha_bwx__
|
||||
/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
|
||||
|
||||
#define __put_user_16(x,addr) \
|
||||
#define __put_user_16(x, addr) \
|
||||
__asm__ __volatile__("1: stw %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
|
@ -270,7 +270,7 @@ __asm__ __volatile__("1: stw %r2,%1\n" \
|
|||
: "=r"(__pu_err) \
|
||||
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
|
||||
|
||||
#define __put_user_8(x,addr) \
|
||||
#define __put_user_8(x, addr) \
|
||||
__asm__ __volatile__("1: stb %r2,%1\n" \
|
||||
"2:\n" \
|
||||
".section __ex_table,\"a\"\n" \
|
||||
|
@ -283,7 +283,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
|
|||
/* Unfortunately, we can't get an unaligned access trap for the sub-word
|
||||
write, so we have to do a general unaligned operation. */
|
||||
|
||||
#define __put_user_16(x,addr) \
|
||||
#define __put_user_16(x, addr) \
|
||||
{ \
|
||||
long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
|
||||
__asm__ __volatile__( \
|
||||
|
@ -308,13 +308,13 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
|
|||
" .long 4b - .\n" \
|
||||
" lda $31, 5b-4b(%0)\n" \
|
||||
".previous" \
|
||||
: "=r"(__pu_err), "=&r"(__pu_tmp1), \
|
||||
"=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
|
||||
: "=r"(__pu_err), "=&r"(__pu_tmp1), \
|
||||
"=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
|
||||
"=&r"(__pu_tmp4) \
|
||||
: "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
|
||||
}
|
||||
|
||||
#define __put_user_8(x,addr) \
|
||||
#define __put_user_8(x, addr) \
|
||||
{ \
|
||||
long __pu_tmp1, __pu_tmp2; \
|
||||
__asm__ __volatile__( \
|
||||
|
@ -330,7 +330,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
|
|||
" .long 2b - .\n" \
|
||||
" lda $31, 3b-2b(%0)\n" \
|
||||
".previous" \
|
||||
: "=r"(__pu_err), \
|
||||
: "=r"(__pu_err), \
|
||||
"=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
|
||||
: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
|
||||
}
|
||||
|
@ -366,7 +366,7 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
|
|||
: "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
|
||||
: __module_address(__copy_user)
|
||||
"0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
|
||||
: "$1","$2","$3","$4","$5","$28","memory");
|
||||
: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
|
||||
|
||||
return __cu_len;
|
||||
}
|
||||
|
@ -379,15 +379,15 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
|
|||
return len;
|
||||
}
|
||||
|
||||
#define __copy_to_user(to,from,n) \
|
||||
#define __copy_to_user(to, from, n) \
|
||||
({ \
|
||||
__chk_user_ptr(to); \
|
||||
__copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \
|
||||
__copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \
|
||||
})
|
||||
#define __copy_from_user(to,from,n) \
|
||||
#define __copy_from_user(to, from, n) \
|
||||
({ \
|
||||
__chk_user_ptr(from); \
|
||||
__copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
|
||||
__copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \
|
||||
})
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
|
@ -418,7 +418,7 @@ __clear_user(void __user *to, long len)
|
|||
: "=r"(__cl_len), "=r"(__cl_to)
|
||||
: __module_address(__do_clear_user)
|
||||
"0"(__cl_len), "1"(__cl_to)
|
||||
: "$1","$2","$3","$4","$5","$28","memory");
|
||||
: "$1", "$2", "$3", "$4", "$5", "$28", "memory");
|
||||
return __cl_len;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
|
||||
}
|
||||
|
||||
#define segment_eq(a,b) ((a) == (b))
|
||||
#define segment_eq(a, b) ((a) == (b))
|
||||
|
||||
#define __addr_ok(addr) ({ \
|
||||
unsigned long flag; \
|
||||
|
@ -84,7 +84,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||
(flag == 0); })
|
||||
|
||||
/* We use 33-bit arithmetic here... */
|
||||
#define __range_ok(addr,size) ({ \
|
||||
#define __range_ok(addr, size) ({ \
|
||||
unsigned long flag, roksum; \
|
||||
__chk_user_ptr(addr); \
|
||||
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
|
||||
|
@ -123,7 +123,7 @@ extern int __get_user_64t_4(void *);
|
|||
#define __GUP_CLOBBER_32t_8 "lr", "cc"
|
||||
#define __GUP_CLOBBER_8 "lr", "cc"
|
||||
|
||||
#define __get_user_x(__r2,__p,__e,__l,__s) \
|
||||
#define __get_user_x(__r2, __p, __e, __l, __s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%1", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
|
@ -134,7 +134,7 @@ extern int __get_user_64t_4(void *);
|
|||
|
||||
/* narrowing a double-word get into a single 32bit word register: */
|
||||
#ifdef __ARMEB__
|
||||
#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
|
||||
#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
|
||||
__get_user_x(__r2, __p, __e, __l, 32t_8)
|
||||
#else
|
||||
#define __get_user_x_32t __get_user_x
|
||||
|
@ -158,7 +158,7 @@ extern int __get_user_64t_4(void *);
|
|||
#endif
|
||||
|
||||
|
||||
#define __get_user_check(x,p) \
|
||||
#define __get_user_check(x, p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||
|
@ -196,10 +196,10 @@ extern int __get_user_64t_4(void *);
|
|||
__e; \
|
||||
})
|
||||
|
||||
#define get_user(x,p) \
|
||||
#define get_user(x, p) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__get_user_check(x,p); \
|
||||
__get_user_check(x, p); \
|
||||
})
|
||||
|
||||
extern int __put_user_1(void *, unsigned int);
|
||||
|
@ -207,7 +207,7 @@ extern int __put_user_2(void *, unsigned int);
|
|||
extern int __put_user_4(void *, unsigned int);
|
||||
extern int __put_user_8(void *, unsigned long long);
|
||||
|
||||
#define __put_user_x(__r2,__p,__e,__l,__s) \
|
||||
#define __put_user_x(__r2, __p, __e, __l, __s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%2", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
|
@ -216,7 +216,7 @@ extern int __put_user_8(void *, unsigned long long);
|
|||
: "0" (__p), "r" (__r2), "r" (__l) \
|
||||
: "ip", "lr", "cc")
|
||||
|
||||
#define __put_user_check(x,p) \
|
||||
#define __put_user_check(x, p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
const typeof(*(p)) __user *__tmp_p = (p); \
|
||||
|
@ -242,10 +242,10 @@ extern int __put_user_8(void *, unsigned long long);
|
|||
__e; \
|
||||
})
|
||||
|
||||
#define put_user(x,p) \
|
||||
#define put_user(x, p) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__put_user_check(x,p); \
|
||||
__put_user_check(x, p); \
|
||||
})
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
|
@ -255,21 +255,21 @@ extern int __put_user_8(void *, unsigned long long);
|
|||
*/
|
||||
#define USER_DS KERNEL_DS
|
||||
|
||||
#define segment_eq(a,b) (1)
|
||||
#define __addr_ok(addr) ((void)(addr),1)
|
||||
#define __range_ok(addr,size) ((void)(addr),0)
|
||||
#define segment_eq(a, b) (1)
|
||||
#define __addr_ok(addr) ((void)(addr), 1)
|
||||
#define __range_ok(addr, size) ((void)(addr), 0)
|
||||
#define get_fs() (KERNEL_DS)
|
||||
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
}
|
||||
|
||||
#define get_user(x,p) __get_user(x,p)
|
||||
#define put_user(x,p) __put_user(x,p)
|
||||
#define get_user(x, p) __get_user(x, p)
|
||||
#define put_user(x, p) __put_user(x, p)
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
|
||||
#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
|
||||
|
||||
#define user_addr_max() \
|
||||
(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
|
||||
|
@ -283,35 +283,35 @@ static inline void set_fs(mm_segment_t fs)
|
|||
* error occurs, and leave it unchanged on success. Note that these
|
||||
* versions are void (ie, don't return a value as such).
|
||||
*/
|
||||
#define __get_user(x,ptr) \
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
long __gu_err = 0; \
|
||||
__get_user_err((x),(ptr),__gu_err); \
|
||||
__get_user_err((x), (ptr), __gu_err); \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_error(x,ptr,err) \
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__get_user_err((x),(ptr),err); \
|
||||
__get_user_err((x), (ptr), err); \
|
||||
(void) 0; \
|
||||
})
|
||||
|
||||
#define __get_user_err(x,ptr,err) \
|
||||
#define __get_user_err(x, ptr, err) \
|
||||
do { \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
unsigned long __gu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
|
||||
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
|
||||
case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
|
||||
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
|
||||
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
|
||||
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
|
||||
default: (__gu_val) = __get_user_bad(); \
|
||||
} \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_asm_byte(x,addr,err) \
|
||||
#define __get_user_asm_byte(x, addr, err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: " TUSER(ldrb) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
|
@ -330,7 +330,7 @@ do { \
|
|||
: "cc")
|
||||
|
||||
#ifndef __ARMEB__
|
||||
#define __get_user_asm_half(x,__gu_addr,err) \
|
||||
#define __get_user_asm_half(x, __gu_addr, err) \
|
||||
({ \
|
||||
unsigned long __b1, __b2; \
|
||||
__get_user_asm_byte(__b1, __gu_addr, err); \
|
||||
|
@ -338,7 +338,7 @@ do { \
|
|||
(x) = __b1 | (__b2 << 8); \
|
||||
})
|
||||
#else
|
||||
#define __get_user_asm_half(x,__gu_addr,err) \
|
||||
#define __get_user_asm_half(x, __gu_addr, err) \
|
||||
({ \
|
||||
unsigned long __b1, __b2; \
|
||||
__get_user_asm_byte(__b1, __gu_addr, err); \
|
||||
|
@ -347,7 +347,7 @@ do { \
|
|||
})
|
||||
#endif
|
||||
|
||||
#define __get_user_asm_word(x,addr,err) \
|
||||
#define __get_user_asm_word(x, addr, err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: " TUSER(ldr) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
|
@ -365,35 +365,35 @@ do { \
|
|||
: "r" (addr), "i" (-EFAULT) \
|
||||
: "cc")
|
||||
|
||||
#define __put_user(x,ptr) \
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
long __pu_err = 0; \
|
||||
__put_user_err((x),(ptr),__pu_err); \
|
||||
__put_user_err((x), (ptr), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_error(x,ptr,err) \
|
||||
#define __put_user_error(x, ptr, err) \
|
||||
({ \
|
||||
__put_user_err((x),(ptr),err); \
|
||||
__put_user_err((x), (ptr), err); \
|
||||
(void) 0; \
|
||||
})
|
||||
|
||||
#define __put_user_err(x,ptr,err) \
|
||||
#define __put_user_err(x, ptr, err) \
|
||||
do { \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
|
||||
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
|
||||
case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
|
||||
case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
|
||||
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
|
||||
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
|
||||
case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \
|
||||
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
|
||||
default: __put_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_asm_byte(x,__pu_addr,err) \
|
||||
#define __put_user_asm_byte(x, __pu_addr, err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: " TUSER(strb) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
|
@ -411,22 +411,22 @@ do { \
|
|||
: "cc")
|
||||
|
||||
#ifndef __ARMEB__
|
||||
#define __put_user_asm_half(x,__pu_addr,err) \
|
||||
#define __put_user_asm_half(x, __pu_addr, err) \
|
||||
({ \
|
||||
unsigned long __temp = (unsigned long)(x); \
|
||||
unsigned long __temp = (__force unsigned long)(x); \
|
||||
__put_user_asm_byte(__temp, __pu_addr, err); \
|
||||
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
|
||||
})
|
||||
#else
|
||||
#define __put_user_asm_half(x,__pu_addr,err) \
|
||||
#define __put_user_asm_half(x, __pu_addr, err) \
|
||||
({ \
|
||||
unsigned long __temp = (unsigned long)(x); \
|
||||
unsigned long __temp = (__force unsigned long)(x); \
|
||||
__put_user_asm_byte(__temp >> 8, __pu_addr, err); \
|
||||
__put_user_asm_byte(__temp, __pu_addr + 1, err); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#define __put_user_asm_word(x,__pu_addr,err) \
|
||||
#define __put_user_asm_word(x, __pu_addr, err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: " TUSER(str) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
|
@ -451,7 +451,7 @@ do { \
|
|||
#define __reg_oper1 "%R2"
|
||||
#endif
|
||||
|
||||
#define __put_user_asm_dword(x,__pu_addr,err) \
|
||||
#define __put_user_asm_dword(x, __pu_addr, err) \
|
||||
__asm__ __volatile__( \
|
||||
ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
|
||||
ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
|
||||
|
@ -480,9 +480,9 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void
|
|||
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
|
||||
#else
|
||||
#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
|
||||
#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
|
||||
#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
|
||||
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
|
||||
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
|
||||
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
|
||||
#endif
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
|
|
|
@ -63,7 +63,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||
current_thread_info()->addr_limit = fs;
|
||||
}
|
||||
|
||||
#define segment_eq(a,b) ((a) == (b))
|
||||
#define segment_eq(a, b) ((a) == (b))
|
||||
|
||||
/*
|
||||
* Return 1 if addr < current->addr_limit, 0 otherwise.
|
||||
|
@ -147,7 +147,7 @@ do { \
|
|||
default: \
|
||||
BUILD_BUG(); \
|
||||
} \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
|
|
|
@ -26,7 +26,7 @@ typedef struct {
|
|||
* For historical reasons (Data Segment Register?), these macros are misnamed.
|
||||
*/
|
||||
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
|
||||
#define segment_eq(a,b) ((a).is_user_space == (b).is_user_space)
|
||||
#define segment_eq(a, b) ((a).is_user_space == (b).is_user_space)
|
||||
|
||||
#define USER_ADDR_LIMIT 0x80000000
|
||||
|
||||
|
@ -108,8 +108,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
|||
*
|
||||
* Returns zero on success, or -EFAULT on error.
|
||||
*/
|
||||
#define put_user(x,ptr) \
|
||||
__put_user_check((x),(ptr),sizeof(*(ptr)))
|
||||
#define put_user(x, ptr) \
|
||||
__put_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* get_user: - Get a simple variable from user space.
|
||||
|
@ -128,8 +128,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
|||
* Returns zero on success, or -EFAULT on error.
|
||||
* On error, the variable @x is set to zero.
|
||||
*/
|
||||
#define get_user(x,ptr) \
|
||||
__get_user_check((x),(ptr),sizeof(*(ptr)))
|
||||
#define get_user(x, ptr) \
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* __put_user: - Write a simple value into user space, with less checking.
|
||||
|
@ -150,8 +150,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
|||
*
|
||||
* Returns zero on success, or -EFAULT on error.
|
||||
*/
|
||||
#define __put_user(x,ptr) \
|
||||
__put_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
#define __put_user(x, ptr) \
|
||||
__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* __get_user: - Get a simple variable from user space, with less checking.
|
||||
|
@ -173,8 +173,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
|
|||
* Returns zero on success, or -EFAULT on error.
|
||||
* On error, the variable @x is set to zero.
|
||||
*/
|
||||
#define __get_user(x,ptr) \
|
||||
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
extern int __get_user_bad(void);
|
||||
extern int __put_user_bad(void);
|
||||
|
@ -191,7 +191,7 @@ extern int __put_user_bad(void);
|
|||
default: __gu_err = __get_user_bad(); break; \
|
||||
} \
|
||||
\
|
||||
x = (typeof(*(ptr)))__gu_val; \
|
||||
x = (__force typeof(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
@ -222,7 +222,7 @@ extern int __put_user_bad(void);
|
|||
} else { \
|
||||
__gu_err = -EFAULT; \
|
||||
} \
|
||||
x = (typeof(*(ptr)))__gu_val; \
|
||||
x = (__force typeof(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
@ -278,7 +278,7 @@ extern int __put_user_bad(void);
|
|||
__pu_err); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_asm("d", __pu_addr, __pu_val, \
|
||||
__put_user_asm("d", __pu_addr, __pu_val, \
|
||||
__pu_err); \
|
||||
break; \
|
||||
default: \
|
||||
|
|
|
@ -27,7 +27,7 @@ static inline void set_fs(mm_segment_t fs)
|
|||
current_thread_info()->addr_limit = fs;
|
||||
}
|
||||
|
||||
#define segment_eq(a,b) ((a) == (b))
|
||||
#define segment_eq(a, b) ((a) == (b))
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
@ -68,11 +68,11 @@ struct exception_table_entry {
|
|||
* use the right size if we just have the right pointer type.
|
||||
*/
|
||||
|
||||
#define put_user(x,p) \
|
||||
#define put_user(x, p) \
|
||||
({ \
|
||||
int _err = 0; \
|
||||
typeof(*(p)) _x = (x); \
|
||||
typeof(*(p)) __user *_p = (p); \
|
||||
typeof(*(p)) __user *_p = (p); \
|
||||
if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\
|
||||
_err = -EFAULT; \
|
||||
} \
|
||||
|
@ -89,10 +89,10 @@ struct exception_table_entry {
|
|||
break; \
|
||||
case 8: { \
|
||||
long _xl, _xh; \
|
||||
_xl = ((long *)&_x)[0]; \
|
||||
_xh = ((long *)&_x)[1]; \
|
||||
__put_user_asm(_xl, ((long __user *)_p)+0, ); \
|
||||
__put_user_asm(_xh, ((long __user *)_p)+1, ); \
|
||||
_xl = ((__force long *)&_x)[0]; \
|
||||
_xh = ((__force long *)&_x)[1]; \
|
||||
__put_user_asm(_xl, ((__force long __user *)_p)+0, );\
|
||||
__put_user_asm(_xh, ((__force long __user *)_p)+1, );\
|
||||
} break; \
|
||||
default: \
|
||||
_err = __put_user_bad(); \
|
||||
|
@ -102,7 +102,7 @@ struct exception_table_entry {
|
|||
_err; \
|
||||
})
|
||||
|
||||
#define __put_user(x,p) put_user(x,p)
|
||||
#define __put_user(x, p) put_user(x, p)
|
||||
static inline int bad_user_access_length(void)
|
||||
{
|
||||
panic("bad_user_access_length");
|
||||
|
@ -121,10 +121,10 @@ static inline int bad_user_access_length(void)
|
|||
|
||||
#define __ptr(x) ((unsigned long __force *)(x))
|
||||
|
||||
#define __put_user_asm(x,p,bhw) \
|
||||
#define __put_user_asm(x, p, bhw) \
|
||||
__asm__ (#bhw"[%1] = %0;\n\t" \
|
||||
: /* no outputs */ \
|
||||
:"d" (x),"a" (__ptr(p)) : "memory")
|
||||
:"d" (x), "a" (__ptr(p)) : "memory")
|
||||
|
||||
#define get_user(x, ptr) \
|
||||
({ \
|
||||
|
@ -136,10 +136,10 @@ static inline int bad_user_access_length(void)
|
|||
BUILD_BUG_ON(ptr_size >= 8); \
|
||||
switch (ptr_size) { \
|
||||
case 1: \
|
||||
__get_user_asm(_val, _p, B,(Z)); \
|
||||
__get_user_asm(_val, _p, B, (Z)); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm(_val, _p, W,(Z)); \
|
||||
__get_user_asm(_val, _p, W, (Z)); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm(_val, _p, , ); \
|
||||
|
@ -147,11 +147,11 @@ static inline int bad_user_access_length(void)
|
|||
} \
|
||||
} else \
|
||||
_err = -EFAULT; \
|
||||
x = (typeof(*(ptr)))_val; \
|
||||
x = (__force typeof(*(ptr)))_val; \
|
||||
_err; \
|
||||
})
|
||||
|
||||
#define __get_user(x,p) get_user(x,p)
|
||||
#define __get_user(x, p) get_user(x, p)
|
||||
|
||||
#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
|
||||
|
||||
|
@ -168,10 +168,10 @@ static inline int bad_user_access_length(void)
|
|||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\
|
||||
#define copy_to_user_ret(to, from, n, retval) ({ if (copy_to_user(to, from, n))\
|
||||
return retval; })
|
||||
|
||||
#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\
|
||||
#define copy_from_user_ret(to, from, n, retval) ({ if (copy_from_user(to, from, n))\
|
||||
return retval; })
|
||||
|
||||
static inline unsigned long __must_check
|
||||
|
|
|
@ -47,12 +47,13 @@
|
|||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
|
||||
#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
|
||||
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
|
||||
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
|
||||
#define __user_ok(addr, size) \
|
||||
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
|
||||
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
|
||||
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
|
||||
|
||||
#include <arch/uaccess.h>
|
||||
|
||||
|
@ -92,56 +93,56 @@ struct exception_table_entry
|
|||
* CRIS, we can just do these as direct assignments. (Of course, the
|
||||
* exception handling means that it's no longer "just"...)
|
||||
*/
|
||||
#define get_user(x,ptr) \
|
||||
__get_user_check((x),(ptr),sizeof(*(ptr)))
|
||||
#define put_user(x,ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
|
||||
#define get_user(x, ptr) \
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
#define put_user(x, ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#define __get_user(x,ptr) \
|
||||
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
#define __put_user(x,ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
#define __put_user(x, ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
extern long __put_user_bad(void);
|
||||
|
||||
#define __put_user_size(x,ptr,size,retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \
|
||||
case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \
|
||||
case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \
|
||||
case 8: __put_user_asm_64(x,ptr,retval); break; \
|
||||
default: __put_user_bad(); \
|
||||
} \
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(x, ptr, retval, "move.b"); break; \
|
||||
case 2: __put_user_asm(x, ptr, retval, "move.w"); break; \
|
||||
case 4: __put_user_asm(x, ptr, retval, "move.d"); break; \
|
||||
case 8: __put_user_asm_64(x, ptr, retval); break; \
|
||||
default: __put_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_size(x,ptr,size,retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \
|
||||
case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \
|
||||
case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \
|
||||
case 8: __get_user_asm_64(x,ptr,retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
#define __get_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(x, ptr, retval, "move.b"); break; \
|
||||
case 2: __get_user_asm(x, ptr, retval, "move.w"); break; \
|
||||
case 4: __get_user_asm(x, ptr, retval, "move.d"); break; \
|
||||
case 8: __get_user_asm_64(x, ptr, retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_nocheck(x,ptr,size) \
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
__put_user_size((x),(ptr),(size),__pu_err); \
|
||||
__put_user_size((x), (ptr), (size), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_check(x,ptr,size) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) *__pu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
|
||||
__put_user_size((x),__pu_addr,(size),__pu_err); \
|
||||
__pu_err; \
|
||||
#define __put_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) *__pu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
|
||||
__put_user_size((x), __pu_addr, (size), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
struct __large_struct { unsigned long buf[100]; };
|
||||
|
@ -149,21 +150,21 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
|
||||
|
||||
|
||||
#define __get_user_nocheck(x,ptr,size) \
|
||||
#define __get_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err, __gu_val; \
|
||||
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_check(x,ptr,size) \
|
||||
#define __get_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT, __gu_val = 0; \
|
||||
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_READ,__gu_addr,size)) \
|
||||
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
if (access_ok(VERIFY_READ, __gu_addr, size)) \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
@ -180,7 +181,7 @@ static inline unsigned long
|
|||
__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
return __copy_user(to,from,n);
|
||||
return __copy_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -188,7 +189,7 @@ static inline unsigned long
|
|||
__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
return __copy_user_zeroing(to,from,n);
|
||||
return __copy_user_zeroing(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -196,7 +197,7 @@ static inline unsigned long
|
|||
__generic_clear_user(void __user *to, unsigned long n)
|
||||
{
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
return __do_clear_user(to,n);
|
||||
return __do_clear_user(to, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -373,29 +374,31 @@ static inline unsigned long
|
|||
__generic_copy_from_user_nocheck(void *to, const void __user *from,
|
||||
unsigned long n)
|
||||
{
|
||||
return __copy_user_zeroing(to,from,n);
|
||||
return __copy_user_zeroing(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__generic_copy_to_user_nocheck(void __user *to, const void *from,
|
||||
unsigned long n)
|
||||
{
|
||||
return __copy_user(to,from,n);
|
||||
return __copy_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__generic_clear_user_nocheck(void __user *to, unsigned long n)
|
||||
{
|
||||
return __do_clear_user(to,n);
|
||||
return __do_clear_user(to, n);
|
||||
}
|
||||
|
||||
/* without checking */
|
||||
|
||||
#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
|
||||
#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
|
||||
#define __copy_to_user(to, from, n) \
|
||||
__generic_copy_to_user_nocheck((to), (from), (n))
|
||||
#define __copy_from_user(to, from, n) \
|
||||
__generic_copy_from_user_nocheck((to), (from), (n))
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))
|
||||
#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
|
||||
|
||||
#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ typedef struct {
|
|||
|
||||
#define get_ds() (KERNEL_DS)
|
||||
#define get_fs() (__current_thread_info->addr_limit)
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
#define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS)
|
||||
#define get_addr_limit() (get_fs().seg)
|
||||
|
||||
|
|
|
@ -169,10 +169,11 @@ do { \
|
|||
(err) = ia64_getreg(_IA64_REG_R8); \
|
||||
(val) = ia64_getreg(_IA64_REG_R9); \
|
||||
} while (0)
|
||||
# define __put_user_size(val, addr, n, err) \
|
||||
do { \
|
||||
__st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \
|
||||
(err) = ia64_getreg(_IA64_REG_R8); \
|
||||
# define __put_user_size(val, addr, n, err) \
|
||||
do { \
|
||||
__st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \
|
||||
(__force unsigned long) (val)); \
|
||||
(err) = ia64_getreg(_IA64_REG_R8); \
|
||||
} while (0)
|
||||
#endif /* !ASM_SUPPORTED */
|
||||
|
||||
|
@ -197,7 +198,7 @@ extern void __get_user_unknown (void);
|
|||
case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
|
||||
default: __get_user_unknown(); break; \
|
||||
} \
|
||||
(x) = (__typeof__(*(__gu_ptr))) __gu_val; \
|
||||
(x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ static inline void set_fs(mm_segment_t s)
|
|||
|
||||
#endif /* not CONFIG_MMU */
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#define __addr_ok(addr) \
|
||||
((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
|
||||
|
@ -68,7 +68,7 @@ static inline void set_fs(mm_segment_t s)
|
|||
*
|
||||
* This needs 33-bit arithmetic. We have a carry...
|
||||
*/
|
||||
#define __range_ok(addr,size) ({ \
|
||||
#define __range_ok(addr, size) ({ \
|
||||
unsigned long flag, roksum; \
|
||||
__chk_user_ptr(addr); \
|
||||
asm ( \
|
||||
|
@ -103,7 +103,7 @@ static inline void set_fs(mm_segment_t s)
|
|||
* this function, memory access functions may still return -EFAULT.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
|
||||
#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
|
||||
#else
|
||||
static inline int access_ok(int type, const void *addr, unsigned long size)
|
||||
{
|
||||
|
@ -167,8 +167,8 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||
* Returns zero on success, or -EFAULT on error.
|
||||
* On error, the variable @x is set to zero.
|
||||
*/
|
||||
#define get_user(x,ptr) \
|
||||
__get_user_check((x),(ptr),sizeof(*(ptr)))
|
||||
#define get_user(x, ptr) \
|
||||
__get_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/**
|
||||
* put_user: - Write a simple value into user space.
|
||||
|
@ -186,8 +186,8 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||
*
|
||||
* Returns zero on success, or -EFAULT on error.
|
||||
*/
|
||||
#define put_user(x,ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
|
||||
#define put_user(x, ptr) \
|
||||
__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/**
|
||||
* __get_user: - Get a simple variable from user space, with less checking.
|
||||
|
@ -209,41 +209,41 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||
* Returns zero on success, or -EFAULT on error.
|
||||
* On error, the variable @x is set to zero.
|
||||
*/
|
||||
#define __get_user(x,ptr) \
|
||||
__get_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
#define __get_user_nocheck(x,ptr,size) \
|
||||
#define __get_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = 0; \
|
||||
unsigned long __gu_val; \
|
||||
might_fault(); \
|
||||
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_check(x,ptr,size) \
|
||||
#define __get_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT; \
|
||||
unsigned long __gu_val = 0; \
|
||||
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_READ,__gu_addr,size)) \
|
||||
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
if (access_ok(VERIFY_READ, __gu_addr, size)) \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
extern long __get_user_bad(void);
|
||||
|
||||
#define __get_user_size(x,ptr,size,retval) \
|
||||
#define __get_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(x,ptr,retval,"ub"); break; \
|
||||
case 2: __get_user_asm(x,ptr,retval,"uh"); break; \
|
||||
case 4: __get_user_asm(x,ptr,retval,""); break; \
|
||||
case 1: __get_user_asm(x, ptr, retval, "ub"); break; \
|
||||
case 2: __get_user_asm(x, ptr, retval, "uh"); break; \
|
||||
case 4: __get_user_asm(x, ptr, retval, ""); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -288,26 +288,26 @@ do { \
|
|||
*
|
||||
* Returns zero on success, or -EFAULT on error.
|
||||
*/
|
||||
#define __put_user(x,ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
|
||||
#define __put_user(x, ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
|
||||
#define __put_user_nocheck(x,ptr,size) \
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
might_fault(); \
|
||||
__put_user_size((x),(ptr),(size),__pu_err); \
|
||||
__put_user_size((x), (ptr), (size), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
|
||||
#define __put_user_check(x,ptr,size) \
|
||||
#define __put_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
|
||||
might_fault(); \
|
||||
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
|
||||
__put_user_size((x),__pu_addr,(size),__pu_err); \
|
||||
if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
|
||||
__put_user_size((x), __pu_addr, (size), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
|
@ -366,15 +366,15 @@ do { \
|
|||
|
||||
extern void __put_user_bad(void);
|
||||
|
||||
#define __put_user_size(x,ptr,size,retval) \
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(x,ptr,retval,"b"); break; \
|
||||
case 2: __put_user_asm(x,ptr,retval,"h"); break; \
|
||||
case 4: __put_user_asm(x,ptr,retval,""); break; \
|
||||
case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\
|
||||
case 1: __put_user_asm(x, ptr, retval, "b"); break; \
|
||||
case 2: __put_user_asm(x, ptr, retval, "h"); break; \
|
||||
case 4: __put_user_asm(x, ptr, retval, ""); break; \
|
||||
case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\
|
||||
default: __put_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -421,7 +421,7 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
|
||||
/* Generic arbitrary sized copy. */
|
||||
/* Return the number of bytes NOT copied. */
|
||||
#define __copy_user(to,from,size) \
|
||||
#define __copy_user(to, from, size) \
|
||||
do { \
|
||||
unsigned long __dst, __src, __c; \
|
||||
__asm__ __volatile__ ( \
|
||||
|
@ -478,7 +478,7 @@ do { \
|
|||
: "r14", "memory"); \
|
||||
} while (0)
|
||||
|
||||
#define __copy_user_zeroing(to,from,size) \
|
||||
#define __copy_user_zeroing(to, from, size) \
|
||||
do { \
|
||||
unsigned long __dst, __src, __c; \
|
||||
__asm__ __volatile__ ( \
|
||||
|
@ -548,14 +548,14 @@ do { \
|
|||
static inline unsigned long __generic_copy_from_user_nocheck(void *to,
|
||||
const void __user *from, unsigned long n)
|
||||
{
|
||||
__copy_user_zeroing(to,from,n);
|
||||
__copy_user_zeroing(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
|
||||
const void *from, unsigned long n)
|
||||
{
|
||||
__copy_user(to,from,n);
|
||||
__copy_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -576,8 +576,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
|||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
#define __copy_to_user(to,from,n) \
|
||||
__generic_copy_to_user_nocheck((to),(from),(n))
|
||||
#define __copy_to_user(to, from, n) \
|
||||
__generic_copy_to_user_nocheck((to), (from), (n))
|
||||
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
@ -595,10 +595,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
|||
* Returns number of bytes that could not be copied.
|
||||
* On success, this will be zero.
|
||||
*/
|
||||
#define copy_to_user(to,from,n) \
|
||||
#define copy_to_user(to, from, n) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__generic_copy_to_user((to),(from),(n)); \
|
||||
__generic_copy_to_user((to), (from), (n)); \
|
||||
})
|
||||
|
||||
/**
|
||||
|
@ -617,8 +617,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
|||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*/
|
||||
#define __copy_from_user(to,from,n) \
|
||||
__generic_copy_from_user_nocheck((to),(from),(n))
|
||||
#define __copy_from_user(to, from, n) \
|
||||
__generic_copy_from_user_nocheck((to), (from), (n))
|
||||
|
||||
/**
|
||||
* copy_from_user: - Copy a block of data from user space.
|
||||
|
@ -636,10 +636,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
|
|||
* If some data could not be copied, this function will pad the copied
|
||||
* data to the requested size using zero bytes.
|
||||
*/
|
||||
#define copy_from_user(to,from,n) \
|
||||
#define copy_from_user(to, from, n) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__generic_copy_from_user((to),(from),(n)); \
|
||||
__generic_copy_from_user((to), (from), (n)); \
|
||||
})
|
||||
|
||||
long __must_check strncpy_from_user(char *dst, const char __user *src,
|
||||
|
|
|
@ -58,7 +58,7 @@ static inline mm_segment_t get_ds(void)
|
|||
#define set_fs(x) (current_thread_info()->addr_limit = (x))
|
||||
#endif
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -128,25 +128,25 @@ asm volatile ("\n" \
|
|||
#define put_user(x, ptr) __put_user(x, ptr)
|
||||
|
||||
|
||||
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
|
||||
type __gu_val; \
|
||||
asm volatile ("\n" \
|
||||
"1: "MOVES"."#bwl" %2,%1\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .even\n" \
|
||||
"10: move.l %3,%0\n" \
|
||||
" sub.l %1,%1\n" \
|
||||
" jra 2b\n" \
|
||||
" .previous\n" \
|
||||
"\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .align 4\n" \
|
||||
" .long 1b,10b\n" \
|
||||
" .previous" \
|
||||
: "+d" (res), "=&" #reg (__gu_val) \
|
||||
: "m" (*(ptr)), "i" (err)); \
|
||||
(x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
|
||||
#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
|
||||
type __gu_val; \
|
||||
asm volatile ("\n" \
|
||||
"1: "MOVES"."#bwl" %2,%1\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .even\n" \
|
||||
"10: move.l %3,%0\n" \
|
||||
" sub.l %1,%1\n" \
|
||||
" jra 2b\n" \
|
||||
" .previous\n" \
|
||||
"\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .align 4\n" \
|
||||
" .long 1b,10b\n" \
|
||||
" .previous" \
|
||||
: "+d" (res), "=&" #reg (__gu_val) \
|
||||
: "m" (*(ptr)), "i" (err)); \
|
||||
(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
|
||||
})
|
||||
|
||||
#define __get_user(x, ptr) \
|
||||
|
@ -188,7 +188,7 @@ asm volatile ("\n" \
|
|||
"+a" (__gu_ptr) \
|
||||
: "i" (-EFAULT) \
|
||||
: "memory"); \
|
||||
(x) = (typeof(*(ptr)))__gu_val; \
|
||||
(x) = (__force typeof(*(ptr)))__gu_val; \
|
||||
break; \
|
||||
} */ \
|
||||
default: \
|
||||
|
|
|
@ -107,18 +107,23 @@ extern long __put_user_asm_w(unsigned int x, void __user *addr);
|
|||
extern long __put_user_asm_d(unsigned int x, void __user *addr);
|
||||
extern long __put_user_asm_l(unsigned long long x, void __user *addr);
|
||||
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
retval = __put_user_asm_b((unsigned int)x, ptr); break; \
|
||||
retval = __put_user_asm_b((__force unsigned int)x, ptr);\
|
||||
break; \
|
||||
case 2: \
|
||||
retval = __put_user_asm_w((unsigned int)x, ptr); break; \
|
||||
retval = __put_user_asm_w((__force unsigned int)x, ptr);\
|
||||
break; \
|
||||
case 4: \
|
||||
retval = __put_user_asm_d((unsigned int)x, ptr); break; \
|
||||
retval = __put_user_asm_d((__force unsigned int)x, ptr);\
|
||||
break; \
|
||||
case 8: \
|
||||
retval = __put_user_asm_l((unsigned long long)x, ptr); break; \
|
||||
retval = __put_user_asm_l((__force unsigned long long)x,\
|
||||
ptr); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
} \
|
||||
|
@ -135,7 +140,7 @@ extern long __get_user_bad(void);
|
|||
({ \
|
||||
long __gu_err, __gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
@ -145,7 +150,7 @@ extern long __get_user_bad(void);
|
|||
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_READ, __gu_addr, size)) \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ struct __large_struct {
|
|||
({ \
|
||||
long __gu_err, __gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
@ -202,7 +202,7 @@ struct __large_struct {
|
|||
const __typeof__(*(ptr)) * __gu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_READ, __gu_addr, size)) \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#define KERNEL_DS ((mm_segment_t){0})
|
||||
#define USER_DS ((mm_segment_t){1})
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#define get_ds() (KERNEL_DS)
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
|
@ -42,14 +42,14 @@ static inline long access_ok(int type, const void __user * addr,
|
|||
#if !defined(CONFIG_64BIT)
|
||||
#define LDD_KERNEL(ptr) BUILD_BUG()
|
||||
#define LDD_USER(ptr) BUILD_BUG()
|
||||
#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
|
||||
#define STD_USER(x, ptr) __put_user_asm64(x,ptr)
|
||||
#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr)
|
||||
#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
|
||||
#define ASM_WORD_INSN ".word\t"
|
||||
#else
|
||||
#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
|
||||
#define LDD_USER(ptr) __get_user_asm("ldd",ptr)
|
||||
#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
|
||||
#define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
|
||||
#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr)
|
||||
#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
|
||||
#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr)
|
||||
#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
|
||||
#define ASM_WORD_INSN ".dword\t"
|
||||
#endif
|
||||
|
||||
|
@ -80,68 +80,68 @@ struct exception_data {
|
|||
unsigned long fault_addr;
|
||||
};
|
||||
|
||||
#define __get_user(x,ptr) \
|
||||
({ \
|
||||
register long __gu_err __asm__ ("r8") = 0; \
|
||||
register long __gu_val __asm__ ("r9") = 0; \
|
||||
\
|
||||
if (segment_eq(get_fs(),KERNEL_DS)) { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_kernel_asm("ldb",ptr); break; \
|
||||
case 2: __get_kernel_asm("ldh",ptr); break; \
|
||||
case 4: __get_kernel_asm("ldw",ptr); break; \
|
||||
case 8: LDD_KERNEL(ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_asm("ldb",ptr); break; \
|
||||
case 2: __get_user_asm("ldh",ptr); break; \
|
||||
case 4: __get_user_asm("ldw",ptr); break; \
|
||||
case 8: LDD_USER(ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
(x) = (__typeof__(*(ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
#define __get_user(x, ptr) \
|
||||
({ \
|
||||
register long __gu_err __asm__ ("r8") = 0; \
|
||||
register long __gu_val __asm__ ("r9") = 0; \
|
||||
\
|
||||
if (segment_eq(get_fs(), KERNEL_DS)) { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_kernel_asm("ldb", ptr); break; \
|
||||
case 2: __get_kernel_asm("ldh", ptr); break; \
|
||||
case 4: __get_kernel_asm("ldw", ptr); break; \
|
||||
case 8: LDD_KERNEL(ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_asm("ldb", ptr); break; \
|
||||
case 2: __get_user_asm("ldh", ptr); break; \
|
||||
case 4: __get_user_asm("ldw", ptr); break; \
|
||||
case 8: LDD_USER(ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
(x) = (__force __typeof__(*(ptr))) __gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_kernel_asm(ldx,ptr) \
|
||||
#define __get_kernel_asm(ldx, ptr) \
|
||||
__asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err) \
|
||||
: "r1");
|
||||
|
||||
#define __get_user_asm(ldx,ptr) \
|
||||
#define __get_user_asm(ldx, ptr) \
|
||||
__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
|
||||
: "=r"(__gu_val), "=r"(__gu_err) \
|
||||
: "r"(ptr), "1"(__gu_err) \
|
||||
: "r1");
|
||||
|
||||
#define __put_user(x,ptr) \
|
||||
#define __put_user(x, ptr) \
|
||||
({ \
|
||||
register long __pu_err __asm__ ("r8") = 0; \
|
||||
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
|
||||
\
|
||||
if (segment_eq(get_fs(),KERNEL_DS)) { \
|
||||
if (segment_eq(get_fs(), KERNEL_DS)) { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_kernel_asm("stb",__x,ptr); break; \
|
||||
case 2: __put_kernel_asm("sth",__x,ptr); break; \
|
||||
case 4: __put_kernel_asm("stw",__x,ptr); break; \
|
||||
case 8: STD_KERNEL(__x,ptr); break; \
|
||||
case 1: __put_kernel_asm("stb", __x, ptr); break; \
|
||||
case 2: __put_kernel_asm("sth", __x, ptr); break; \
|
||||
case 4: __put_kernel_asm("stw", __x, ptr); break; \
|
||||
case 8: STD_KERNEL(__x, ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
else { \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm("stb",__x,ptr); break; \
|
||||
case 2: __put_user_asm("sth",__x,ptr); break; \
|
||||
case 4: __put_user_asm("stw",__x,ptr); break; \
|
||||
case 8: STD_USER(__x,ptr); break; \
|
||||
case 1: __put_user_asm("stb", __x, ptr); break; \
|
||||
case 2: __put_user_asm("sth", __x, ptr); break; \
|
||||
case 4: __put_user_asm("stw", __x, ptr); break; \
|
||||
case 8: STD_USER(__x, ptr); break; \
|
||||
default: BUILD_BUG(); break; \
|
||||
} \
|
||||
} \
|
||||
|
@ -159,18 +159,18 @@ struct exception_data {
|
|||
* r8/r9 are already listed as err/val.
|
||||
*/
|
||||
|
||||
#define __put_kernel_asm(stx,x,ptr) \
|
||||
#define __put_kernel_asm(stx, x, ptr) \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\t" stx "\t%2,0(%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(x), "0"(__pu_err) \
|
||||
: "r1")
|
||||
|
||||
#define __put_user_asm(stx,x,ptr) \
|
||||
#define __put_user_asm(stx, x, ptr) \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(x), "0"(__pu_err) \
|
||||
: "r1")
|
||||
|
@ -178,23 +178,23 @@ struct exception_data {
|
|||
|
||||
#if !defined(CONFIG_64BIT)
|
||||
|
||||
#define __put_kernel_asm64(__val,ptr) do { \
|
||||
#define __put_kernel_asm64(__val, ptr) do { \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\tstw %2,0(%1)" \
|
||||
"\n2:\tstw %R2,4(%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(__val), "0"(__pu_err) \
|
||||
: "r1"); \
|
||||
} while (0)
|
||||
|
||||
#define __put_user_asm64(__val,ptr) do { \
|
||||
#define __put_user_asm64(__val, ptr) do { \
|
||||
__asm__ __volatile__ ( \
|
||||
"\n1:\tstw %2,0(%%sr3,%1)" \
|
||||
"\n2:\tstw %R2,4(%%sr3,%1)\n\t" \
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
|
||||
ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
|
||||
: "=r"(__pu_err) \
|
||||
: "r"(ptr), "r"(__val), "0"(__pu_err) \
|
||||
: "r1"); \
|
||||
|
@ -211,8 +211,8 @@ extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
|
|||
extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
|
||||
extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
|
||||
extern long strncpy_from_user(char *, const char __user *, long);
|
||||
extern unsigned lclear_user(void __user *,unsigned long);
|
||||
extern long lstrnlen_user(const char __user *,long);
|
||||
extern unsigned lclear_user(void __user *, unsigned long);
|
||||
extern long lstrnlen_user(const char __user *, long);
|
||||
/*
|
||||
* Complex access routines -- macros
|
||||
*/
|
||||
|
|
|
@ -23,7 +23,7 @@ typedef struct {
|
|||
#define USER_DS KERNEL_DS
|
||||
#endif
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#define get_ds() (KERNEL_DS)
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
@ -71,7 +71,7 @@ struct __large_struct { unsigned long buf[100]; };
|
|||
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
|
||||
if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__typeof__(*(ptr)))__gu_val; \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
|
|
|
@ -59,19 +59,19 @@ do { \
|
|||
switch (size) { \
|
||||
case 1: \
|
||||
retval = __put_user_asm_b((void *)&x, \
|
||||
(long)ptr); \
|
||||
(__force long)ptr); \
|
||||
break; \
|
||||
case 2: \
|
||||
retval = __put_user_asm_w((void *)&x, \
|
||||
(long)ptr); \
|
||||
(__force long)ptr); \
|
||||
break; \
|
||||
case 4: \
|
||||
retval = __put_user_asm_l((void *)&x, \
|
||||
(long)ptr); \
|
||||
(__force long)ptr); \
|
||||
break; \
|
||||
case 8: \
|
||||
retval = __put_user_asm_q((void *)&x, \
|
||||
(long)ptr); \
|
||||
(__force long)ptr); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_unknown(); \
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
#define get_fs() (current->thread.current_ds)
|
||||
#define set_fs(val) ((current->thread.current_ds) = (val))
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
|
||||
* can be fairly lightweight.
|
||||
|
@ -46,8 +46,8 @@
|
|||
*/
|
||||
#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
|
||||
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
|
||||
#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
|
||||
#define access_ok(type, addr, size) \
|
||||
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
|
||||
#define access_ok(type, addr, size) \
|
||||
({ (void)(type); __access_ok((unsigned long)(addr), size); })
|
||||
|
||||
/*
|
||||
|
@ -91,158 +91,221 @@ void __ret_efault(void);
|
|||
* of a performance impact. Thus we have a few rather ugly macros here,
|
||||
* and hide all the ugliness from the user.
|
||||
*/
|
||||
#define put_user(x,ptr) ({ \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
|
||||
#define put_user(x, ptr) ({ \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#define get_user(x,ptr) ({ \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
|
||||
#define get_user(x, ptr) ({ \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
|
||||
})
|
||||
|
||||
/*
|
||||
* The "__xxx" versions do not do address space checking, useful when
|
||||
* doing multiple accesses to the same area (the user has to do the
|
||||
* checks by hand with "access_ok()")
|
||||
*/
|
||||
#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
|
||||
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
|
||||
#define __put_user(x, ptr) \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
|
||||
#define __get_user(x, ptr) \
|
||||
__get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
|
||||
|
||||
struct __large_struct { unsigned long buf[100]; };
|
||||
#define __m(x) ((struct __large_struct __user *)(x))
|
||||
|
||||
#define __put_user_check(x,addr,size) ({ \
|
||||
register int __pu_ret; \
|
||||
if (__access_ok(addr,size)) { \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
|
||||
case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
|
||||
case 4: __put_user_asm(x,,addr,__pu_ret); break; \
|
||||
case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
|
||||
default: __pu_ret = __put_user_bad(); break; \
|
||||
} } else { __pu_ret = -EFAULT; } __pu_ret; })
|
||||
#define __put_user_check(x, addr, size) ({ \
|
||||
register int __pu_ret; \
|
||||
if (__access_ok(addr, size)) { \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
__put_user_asm(x, b, addr, __pu_ret); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_asm(x, h, addr, __pu_ret); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_asm(x, , addr, __pu_ret); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_asm(x, d, addr, __pu_ret); \
|
||||
break; \
|
||||
default: \
|
||||
__pu_ret = __put_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
__pu_ret = -EFAULT; \
|
||||
} \
|
||||
__pu_ret; \
|
||||
})
|
||||
|
||||
#define __put_user_nocheck(x,addr,size) ({ \
|
||||
register int __pu_ret; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
|
||||
case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
|
||||
case 4: __put_user_asm(x,,addr,__pu_ret); break; \
|
||||
case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
|
||||
default: __pu_ret = __put_user_bad(); break; \
|
||||
} __pu_ret; })
|
||||
#define __put_user_nocheck(x, addr, size) ({ \
|
||||
register int __pu_ret; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(x, b, addr, __pu_ret); break; \
|
||||
case 2: __put_user_asm(x, h, addr, __pu_ret); break; \
|
||||
case 4: __put_user_asm(x, , addr, __pu_ret); break; \
|
||||
case 8: __put_user_asm(x, d, addr, __pu_ret); break; \
|
||||
default: __pu_ret = __put_user_bad(); break; \
|
||||
} \
|
||||
__pu_ret; \
|
||||
})
|
||||
|
||||
#define __put_user_asm(x,size,addr,ret) \
|
||||
#define __put_user_asm(x, size, addr, ret) \
|
||||
__asm__ __volatile__( \
|
||||
"/* Put user asm, inline. */\n" \
|
||||
"1:\t" "st"#size " %1, %2\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"b 2b\n\t" \
|
||||
" mov %3, %0\n\t" \
|
||||
".previous\n\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\t" \
|
||||
".previous\n\n\t" \
|
||||
: "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
"/* Put user asm, inline. */\n" \
|
||||
"1:\t" "st"#size " %1, %2\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"b 2b\n\t" \
|
||||
" mov %3, %0\n\t" \
|
||||
".previous\n\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\t" \
|
||||
".previous\n\n\t" \
|
||||
: "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
|
||||
int __put_user_bad(void);
|
||||
|
||||
#define __get_user_check(x,addr,size,type) ({ \
|
||||
register int __gu_ret; \
|
||||
register unsigned long __gu_val; \
|
||||
if (__access_ok(addr,size)) { \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
|
||||
case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
|
||||
case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
|
||||
case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
|
||||
default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
|
||||
} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
|
||||
#define __get_user_check(x, addr, size, type) ({ \
|
||||
register int __gu_ret; \
|
||||
register unsigned long __gu_val; \
|
||||
if (__access_ok(addr, size)) { \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
__get_user_asm(__gu_val, ub, addr, __gu_ret); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm(__gu_val, uh, addr, __gu_ret); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm(__gu_val, , addr, __gu_ret); \
|
||||
break; \
|
||||
case 8: \
|
||||
__get_user_asm(__gu_val, d, addr, __gu_ret); \
|
||||
break; \
|
||||
default: \
|
||||
__gu_val = 0; \
|
||||
__gu_ret = __get_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
__gu_val = 0; \
|
||||
__gu_ret = -EFAULT; \
|
||||
} \
|
||||
x = (__force type) __gu_val; \
|
||||
__gu_ret; \
|
||||
})
|
||||
|
||||
#define __get_user_check_ret(x,addr,size,type,retval) ({ \
|
||||
register unsigned long __gu_val __asm__ ("l1"); \
|
||||
if (__access_ok(addr,size)) { \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
|
||||
case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
|
||||
case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
|
||||
case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
|
||||
default: if (__get_user_bad()) return retval; \
|
||||
} x = (type) __gu_val; } else return retval; })
|
||||
#define __get_user_check_ret(x, addr, size, type, retval) ({ \
|
||||
register unsigned long __gu_val __asm__ ("l1"); \
|
||||
if (__access_ok(addr, size)) { \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
__get_user_asm_ret(__gu_val, ub, addr, retval); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_asm_ret(__gu_val, uh, addr, retval); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_asm_ret(__gu_val, , addr, retval); \
|
||||
break; \
|
||||
case 8: \
|
||||
__get_user_asm_ret(__gu_val, d, addr, retval); \
|
||||
break; \
|
||||
default: \
|
||||
if (__get_user_bad()) \
|
||||
return retval; \
|
||||
} \
|
||||
x = (__force type) __gu_val; \
|
||||
} else \
|
||||
return retval; \
|
||||
})
|
||||
|
||||
#define __get_user_nocheck(x,addr,size,type) ({ \
|
||||
register int __gu_ret; \
|
||||
register unsigned long __gu_val; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
|
||||
case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
|
||||
case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
|
||||
case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
|
||||
default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
|
||||
} x = (type) __gu_val; __gu_ret; })
|
||||
#define __get_user_nocheck(x, addr, size, type) ({ \
|
||||
register int __gu_ret; \
|
||||
register unsigned long __gu_val; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
|
||||
case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
|
||||
case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \
|
||||
case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \
|
||||
default: \
|
||||
__gu_val = 0; \
|
||||
__gu_ret = __get_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
x = (__force type) __gu_val; \
|
||||
__gu_ret; \
|
||||
})
|
||||
|
||||
#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
|
||||
register unsigned long __gu_val __asm__ ("l1"); \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
|
||||
case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
|
||||
case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
|
||||
case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
|
||||
default: if (__get_user_bad()) return retval; \
|
||||
} x = (type) __gu_val; })
|
||||
#define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \
|
||||
register unsigned long __gu_val __asm__ ("l1"); \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \
|
||||
case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \
|
||||
case 4: __get_user_asm_ret(__gu_val, , addr, retval); break; \
|
||||
case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break; \
|
||||
default: \
|
||||
if (__get_user_bad()) \
|
||||
return retval; \
|
||||
} \
|
||||
x = (__force type) __gu_val; \
|
||||
})
|
||||
|
||||
#define __get_user_asm(x,size,addr,ret) \
|
||||
#define __get_user_asm(x, size, addr, ret) \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm, inline. */\n" \
|
||||
"1:\t" "ld"#size " %2, %1\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"clr %1\n\t" \
|
||||
"b 2b\n\t" \
|
||||
" mov %3, %0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
"/* Get user asm, inline. */\n" \
|
||||
"1:\t" "ld"#size " %2, %1\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"clr %1\n\t" \
|
||||
"b 2b\n\t" \
|
||||
" mov %3, %0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
|
||||
#define __get_user_asm_ret(x,size,addr,retval) \
|
||||
#define __get_user_asm_ret(x, size, addr, retval) \
|
||||
if (__builtin_constant_p(retval) && retval == -EFAULT) \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size " %1, %0\n\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b,__ret_efault\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=&r" (x) : "m" (*__m(addr))); \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size " %1, %0\n\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b,__ret_efault\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=&r" (x) : "m" (*__m(addr))); \
|
||||
else \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size " %1, %0\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"ret\n\t" \
|
||||
" restore %%g0, %2, %%o0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=&r" (x) : "m" (*__m(addr)), "i" (retval))
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size " %1, %0\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"ret\n\t" \
|
||||
" restore %%g0, %2, %%o0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,#alloc\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=&r" (x) : "m" (*__m(addr)), "i" (retval))
|
||||
|
||||
int __get_user_bad(void);
|
||||
|
||||
|
|
|
@ -41,11 +41,11 @@
|
|||
#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
|
||||
#define get_ds() (KERNEL_DS)
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#define set_fs(val) \
|
||||
do { \
|
||||
current_thread_info()->current_ds =(val).seg; \
|
||||
current_thread_info()->current_ds = (val).seg; \
|
||||
__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
|
||||
} while(0)
|
||||
|
||||
|
@ -88,121 +88,135 @@ void __retl_efault(void);
|
|||
* of a performance impact. Thus we have a few rather ugly macros here,
|
||||
* and hide all the ugliness from the user.
|
||||
*/
|
||||
#define put_user(x,ptr) ({ \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
|
||||
#define put_user(x, ptr) ({ \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
|
||||
})
|
||||
|
||||
#define get_user(x,ptr) ({ \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
|
||||
#define get_user(x, ptr) ({ \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
__chk_user_ptr(ptr); \
|
||||
__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
|
||||
})
|
||||
|
||||
#define __put_user(x,ptr) put_user(x,ptr)
|
||||
#define __get_user(x,ptr) get_user(x,ptr)
|
||||
#define __put_user(x, ptr) put_user(x, ptr)
|
||||
#define __get_user(x, ptr) get_user(x, ptr)
|
||||
|
||||
struct __large_struct { unsigned long buf[100]; };
|
||||
#define __m(x) ((struct __large_struct *)(x))
|
||||
|
||||
#define __put_user_nocheck(data,addr,size) ({ \
|
||||
register int __pu_ret; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
|
||||
case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
|
||||
case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
|
||||
case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
|
||||
default: __pu_ret = __put_user_bad(); break; \
|
||||
} __pu_ret; })
|
||||
#define __put_user_nocheck(data, addr, size) ({ \
|
||||
register int __pu_ret; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
|
||||
case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
|
||||
case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
|
||||
case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
|
||||
default: __pu_ret = __put_user_bad(); break; \
|
||||
} \
|
||||
__pu_ret; \
|
||||
})
|
||||
|
||||
#define __put_user_asm(x,size,addr,ret) \
|
||||
#define __put_user_asm(x, size, addr, ret) \
|
||||
__asm__ __volatile__( \
|
||||
"/* Put user asm, inline. */\n" \
|
||||
"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"sethi %%hi(2b), %0\n\t" \
|
||||
"jmpl %0 + %%lo(2b), %%g0\n\t" \
|
||||
" mov %3, %0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\t" \
|
||||
".previous\n\n\t" \
|
||||
: "=r" (ret) : "r" (x), "r" (__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
"/* Put user asm, inline. */\n" \
|
||||
"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"sethi %%hi(2b), %0\n\t" \
|
||||
"jmpl %0 + %%lo(2b), %%g0\n\t" \
|
||||
" mov %3, %0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\t" \
|
||||
".previous\n\n\t" \
|
||||
: "=r" (ret) : "r" (x), "r" (__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
|
||||
int __put_user_bad(void);
|
||||
|
||||
#define __get_user_nocheck(data,addr,size,type) ({ \
|
||||
register int __gu_ret; \
|
||||
register unsigned long __gu_val; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
|
||||
case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
|
||||
case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
|
||||
case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
|
||||
default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
|
||||
} data = (type) __gu_val; __gu_ret; })
|
||||
#define __get_user_nocheck(data, addr, size, type) ({ \
|
||||
register int __gu_ret; \
|
||||
register unsigned long __gu_val; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
|
||||
case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
|
||||
case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
|
||||
case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
|
||||
default: \
|
||||
__gu_val = 0; \
|
||||
__gu_ret = __get_user_bad(); \
|
||||
break; \
|
||||
} \
|
||||
data = (__force type) __gu_val; \
|
||||
__gu_ret; \
|
||||
})
|
||||
|
||||
#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
|
||||
register unsigned long __gu_val __asm__ ("l1"); \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
|
||||
case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
|
||||
case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
|
||||
case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
|
||||
default: if (__get_user_bad()) return retval; \
|
||||
} data = (type) __gu_val; })
|
||||
#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \
|
||||
register unsigned long __gu_val __asm__ ("l1"); \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \
|
||||
case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \
|
||||
case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \
|
||||
case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \
|
||||
default: \
|
||||
if (__get_user_bad()) \
|
||||
return retval; \
|
||||
} \
|
||||
data = (__force type) __gu_val; \
|
||||
})
|
||||
|
||||
#define __get_user_asm(x,size,addr,ret) \
|
||||
#define __get_user_asm(x, size, addr, ret) \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm, inline. */\n" \
|
||||
"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"sethi %%hi(2b), %0\n\t" \
|
||||
"clr %1\n\t" \
|
||||
"jmpl %0 + %%lo(2b), %%g0\n\t" \
|
||||
" mov %3, %0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=r" (ret), "=r" (x) : "r" (__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
"/* Get user asm, inline. */\n" \
|
||||
"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
|
||||
"clr %0\n" \
|
||||
"2:\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"sethi %%hi(2b), %0\n\t" \
|
||||
"clr %1\n\t" \
|
||||
"jmpl %0 + %%lo(2b), %%g0\n\t" \
|
||||
" mov %3, %0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=r" (ret), "=r" (x) : "r" (__m(addr)), \
|
||||
"i" (-EFAULT))
|
||||
|
||||
#define __get_user_asm_ret(x,size,addr,retval) \
|
||||
#define __get_user_asm_ret(x, size, addr, retval) \
|
||||
if (__builtin_constant_p(retval) && retval == -EFAULT) \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b,__ret_efault\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=r" (x) : "r" (__m(addr))); \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b,__ret_efault\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=r" (x) : "r" (__m(addr))); \
|
||||
else \
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"ret\n\t" \
|
||||
" restore %%g0, %2, %%o0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=r" (x) : "r" (__m(addr)), "i" (retval))
|
||||
__asm__ __volatile__( \
|
||||
"/* Get user asm ret, inline. */\n" \
|
||||
"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
|
||||
".section .fixup,#alloc,#execinstr\n\t" \
|
||||
".align 4\n" \
|
||||
"3:\n\t" \
|
||||
"ret\n\t" \
|
||||
" restore %%g0, %2, %%o0\n\n\t" \
|
||||
".previous\n\t" \
|
||||
".section __ex_table,\"a\"\n\t" \
|
||||
".align 4\n\t" \
|
||||
".word 1b, 3b\n\n\t" \
|
||||
".previous\n\t" \
|
||||
: "=r" (x) : "r" (__m(addr)), "i" (retval))
|
||||
|
||||
int __get_user_bad(void);
|
||||
|
||||
|
|
|
@ -179,7 +179,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
|
|||
asm volatile("call __get_user_%P3" \
|
||||
: "=a" (__ret_gu), "=r" (__val_gu) \
|
||||
: "0" (ptr), "i" (sizeof(*(ptr)))); \
|
||||
(x) = (__typeof__(*(ptr))) __val_gu; \
|
||||
(x) = (__force __typeof__(*(ptr))) __val_gu; \
|
||||
__ret_gu; \
|
||||
})
|
||||
|
||||
|
|
|
@ -182,13 +182,13 @@
|
|||
#define get_fs() (current->thread.current_ds)
|
||||
#define set_fs(val) (current->thread.current_ds = (val))
|
||||
|
||||
#define segment_eq(a,b) ((a).seg == (b).seg)
|
||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||
|
||||
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
|
||||
#define __user_ok(addr,size) \
|
||||
#define __user_ok(addr, size) \
|
||||
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
|
||||
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
|
||||
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
|
||||
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
|
||||
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
|
||||
|
||||
/*
|
||||
* These are the main single-value transfer routines. They
|
||||
|
@ -204,8 +204,8 @@
|
|||
* (a) re-use the arguments for side effects (sizeof is ok)
|
||||
* (b) require any knowledge of processes at this stage
|
||||
*/
|
||||
#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
|
||||
#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
|
||||
#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* The "__xxx" versions of the user access functions are versions that
|
||||
|
@ -213,39 +213,39 @@
|
|||
* with a separate "access_ok()" call (this is used when we do multiple
|
||||
* accesses to the same area of user memory).
|
||||
*/
|
||||
#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
|
||||
|
||||
|
||||
extern long __put_user_bad(void);
|
||||
|
||||
#define __put_user_nocheck(x,ptr,size) \
|
||||
#define __put_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err; \
|
||||
__put_user_size((x),(ptr),(size),__pu_err); \
|
||||
__put_user_size((x), (ptr), (size), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_check(x,ptr,size) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) *__pu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
|
||||
__put_user_size((x),__pu_addr,(size),__pu_err); \
|
||||
__pu_err; \
|
||||
#define __put_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __pu_err = -EFAULT; \
|
||||
__typeof__(*(ptr)) *__pu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
|
||||
__put_user_size((x), __pu_addr, (size), __pu_err); \
|
||||
__pu_err; \
|
||||
})
|
||||
|
||||
#define __put_user_size(x,ptr,size,retval) \
|
||||
#define __put_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
int __cb; \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
|
||||
case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
|
||||
case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
|
||||
case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \
|
||||
case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \
|
||||
case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
|
||||
case 8: { \
|
||||
__typeof__(*ptr) __v64 = x; \
|
||||
retval = __copy_to_user(ptr,&__v64,8); \
|
||||
retval = __copy_to_user(ptr, &__v64, 8); \
|
||||
break; \
|
||||
} \
|
||||
default: __put_user_bad(); \
|
||||
|
@ -316,35 +316,35 @@ __asm__ __volatile__( \
|
|||
:"=r" (err), "=r" (cb) \
|
||||
:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
|
||||
|
||||
#define __get_user_nocheck(x,ptr,size) \
|
||||
#define __get_user_nocheck(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err, __gu_val; \
|
||||
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
#define __get_user_check(x,ptr,size) \
|
||||
#define __get_user_check(x, ptr, size) \
|
||||
({ \
|
||||
long __gu_err = -EFAULT, __gu_val = 0; \
|
||||
const __typeof__(*(ptr)) *__gu_addr = (ptr); \
|
||||
if (access_ok(VERIFY_READ,__gu_addr,size)) \
|
||||
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
if (access_ok(VERIFY_READ, __gu_addr, size)) \
|
||||
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
__gu_err; \
|
||||
})
|
||||
|
||||
extern long __get_user_bad(void);
|
||||
|
||||
#define __get_user_size(x,ptr,size,retval) \
|
||||
#define __get_user_size(x, ptr, size, retval) \
|
||||
do { \
|
||||
int __cb; \
|
||||
retval = 0; \
|
||||
switch (size) { \
|
||||
case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
|
||||
case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
|
||||
case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
|
||||
case 8: retval = __copy_from_user(&x,ptr,8); break; \
|
||||
case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\
|
||||
case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
|
||||
case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\
|
||||
case 8: retval = __copy_from_user(&x, ptr, 8); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -390,19 +390,19 @@ __asm__ __volatile__( \
|
|||
*/
|
||||
|
||||
extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
|
||||
#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
|
||||
#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
|
||||
|
||||
|
||||
static inline unsigned long
|
||||
__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
|
||||
{
|
||||
return __copy_user(to,from,n);
|
||||
return __copy_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
|
||||
{
|
||||
return __copy_user(to,from,n);
|
||||
return __copy_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
|
@ -410,7 +410,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
|
|||
{
|
||||
prefetch(from);
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
return __copy_user(to,from,n);
|
||||
return __copy_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -419,18 +419,18 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
|
|||
{
|
||||
prefetchw(to);
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
return __copy_user(to,from,n);
|
||||
return __copy_user(to, from, n);
|
||||
else
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
|
||||
#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
|
||||
#define __copy_to_user(to,from,n) \
|
||||
__generic_copy_to_user_nocheck((to),(from),(n))
|
||||
#define __copy_from_user(to,from,n) \
|
||||
__generic_copy_from_user_nocheck((to),(from),(n))
|
||||
#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
|
||||
#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
|
||||
#define __copy_to_user(to, from, n) \
|
||||
__generic_copy_to_user_nocheck((to), (from), (n))
|
||||
#define __copy_from_user(to, from, n) \
|
||||
__generic_copy_from_user_nocheck((to), (from), (n))
|
||||
#define __copy_to_user_inatomic __copy_to_user
|
||||
#define __copy_from_user_inatomic __copy_from_user
|
||||
|
||||
|
|
Loading…
Reference in New Issue