- Inlines copy_*_user() for correct use of __builtin_const_p() for hardened
usercopy and the recent compile-time checks. - Switches hardened usercopy to only check non-const size arguments to avoid meaningless checks on likely-sane const values. - Updates lkdtm usercopy tests to compenstate for the const checking. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 Comment: Kees Cook <kees@outflux.net> iQIcBAABCgAGBQJXzxqiAAoJEIly9N/cbcAmsHkP/jD/htMTw+Yf5ZxafEHLO3yA IPxwxvEYGHcgmKeFlrjhssOqw1hs0q8D4DVRnlu1NV4XF4b9b+Jz3Tf7PoOhgLLR f4m0TYzX1tVmiK3qAuVbRQGnA1i08CPhiKPNzo43x+ldBPf7nRQufxkLVBrVmFJa qSQMZ2Am483xzEwFwRrlYyqsLwlJOA69rMRPJ+n3NLR/f8xRCkGigIvBVdUXBXkk 0v9EhZ+si6dWHFbfu0fhqlintnrOfcLPf6WmsSl/b5HkHqf/J1OW8FRgnDSDEqaQ /NIap9Ba33ASySilX/qZ7ioSnSVvkL2VmVy5yR2hRBYou1122SXVh2+NwRmmTkCV avNZZuGaR3Bhmu8xUpDicddCC1Vo13jcyB3/iKZkdfHj89RpCWhJRi/+SOoXpCrq ABnMNSoFVIlmDA5aggnevS4or3hOoTp9LosPY/86M4+cYnpIFyZJd4SzwXNg58U6 aEmal9ypfss5kF4gSnZBLnLhE4XYkjSrvxGaRhYeyDI/3mMAKUb1/VeaiMqVAvfs J9k9kOtLspYv5gTDfKK+yucqVTmRIbr/z1jikH+KX+OSR3HRUrcWm1wEWZCdxa5I 2CeZY0q3kAZK5pGvaAxgJlvYfEHShHio19Poc0qsMW2UcwB62YNJ/ybuKFHnZM+Y ACqYh+dcH2v/2DNxigcH =bFFM -----END PGP SIGNATURE----- Merge tag 'usercopy-v4.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux Pull hardened usercopy fixes from Kees Cook: - inline copy_*_user() for correct use of __builtin_const_p() for hardened usercopy and the recent compile-time checks. - switch hardened usercopy to only check non-const size arguments to avoid meaningless checks on likely-sane const values. - update lkdtm usercopy tests to compenstate for the const checking. * tag 'usercopy-v4.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: lkdtm: adjust usercopy tests to bypass const checks usercopy: fold builtin_const check into inline function x86/uaccess: force copy_*_user() to be inlined
This commit is contained in:
commit
08411a7554
|
@ -241,8 +241,7 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
|
|||
static inline unsigned long
|
||||
__copy_to_user (void __user *to, const void *from, unsigned long count)
|
||||
{
|
||||
if (!__builtin_constant_p(count))
|
||||
check_object_size(from, count, true);
|
||||
check_object_size(from, count, true);
|
||||
|
||||
return __copy_user(to, (__force void __user *) from, count);
|
||||
}
|
||||
|
@ -250,8 +249,7 @@ __copy_to_user (void __user *to, const void *from, unsigned long count)
|
|||
static inline unsigned long
|
||||
__copy_from_user (void *to, const void __user *from, unsigned long count)
|
||||
{
|
||||
if (!__builtin_constant_p(count))
|
||||
check_object_size(to, count, false);
|
||||
check_object_size(to, count, false);
|
||||
|
||||
return __copy_user((__force void __user *) to, from, count);
|
||||
}
|
||||
|
@ -265,8 +263,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
|
|||
long __cu_len = (n); \
|
||||
\
|
||||
if (__access_ok(__cu_to, __cu_len, get_fs())) { \
|
||||
if (!__builtin_constant_p(n)) \
|
||||
check_object_size(__cu_from, __cu_len, true); \
|
||||
check_object_size(__cu_from, __cu_len, true); \
|
||||
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
|
||||
} \
|
||||
__cu_len; \
|
||||
|
@ -280,8 +277,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
|
|||
\
|
||||
__chk_user_ptr(__cu_from); \
|
||||
if (__access_ok(__cu_from, __cu_len, get_fs())) { \
|
||||
if (!__builtin_constant_p(n)) \
|
||||
check_object_size(__cu_to, __cu_len, false); \
|
||||
check_object_size(__cu_to, __cu_len, false); \
|
||||
__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
|
||||
} \
|
||||
__cu_len; \
|
||||
|
|
|
@ -311,14 +311,12 @@ static inline unsigned long copy_from_user(void *to,
|
|||
unsigned long over;
|
||||
|
||||
if (access_ok(VERIFY_READ, from, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(to, n, false);
|
||||
check_object_size(to, n, false);
|
||||
return __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
}
|
||||
if ((unsigned long)from < TASK_SIZE) {
|
||||
over = (unsigned long)from + n - TASK_SIZE;
|
||||
if (!__builtin_constant_p(n - over))
|
||||
check_object_size(to, n - over, false);
|
||||
check_object_size(to, n - over, false);
|
||||
return __copy_tofrom_user((__force void __user *)to, from,
|
||||
n - over) + over;
|
||||
}
|
||||
|
@ -331,14 +329,12 @@ static inline unsigned long copy_to_user(void __user *to,
|
|||
unsigned long over;
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
check_object_size(from, n, true);
|
||||
return __copy_tofrom_user(to, (__force void __user *)from, n);
|
||||
}
|
||||
if ((unsigned long)to < TASK_SIZE) {
|
||||
over = (unsigned long)to + n - TASK_SIZE;
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n - over, true);
|
||||
check_object_size(from, n - over, true);
|
||||
return __copy_tofrom_user(to, (__force void __user *)from,
|
||||
n - over) + over;
|
||||
}
|
||||
|
@ -383,8 +379,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(to, n, false);
|
||||
check_object_size(to, n, false);
|
||||
|
||||
return __copy_tofrom_user((__force void __user *)to, from, n);
|
||||
}
|
||||
|
@ -412,8 +407,8 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
|
|||
if (ret == 0)
|
||||
return 0;
|
||||
}
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
|
||||
check_object_size(from, n, true);
|
||||
|
||||
return __copy_tofrom_user(to, (__force const void __user *)from, n);
|
||||
}
|
||||
|
|
|
@ -249,8 +249,7 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
|
|||
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (n && __access_ok((unsigned long) to, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
check_object_size(from, n, true);
|
||||
return __copy_user(to, (__force void __user *) from, n);
|
||||
} else
|
||||
return n;
|
||||
|
@ -258,16 +257,14 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
|
|||
|
||||
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(from, n, true);
|
||||
check_object_size(from, n, true);
|
||||
return __copy_user(to, (__force void __user *) from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
if (n && __access_ok((unsigned long) from, n)) {
|
||||
if (!__builtin_constant_p(n))
|
||||
check_object_size(to, n, false);
|
||||
check_object_size(to, n, false);
|
||||
return __copy_user((__force void __user *) to, from, n);
|
||||
} else
|
||||
return n;
|
||||
|
|
|
@ -212,8 +212,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
|
|||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (!__builtin_constant_p(size))
|
||||
check_object_size(to, size, false);
|
||||
check_object_size(to, size, false);
|
||||
|
||||
ret = ___copy_from_user(to, from, size);
|
||||
if (unlikely(ret))
|
||||
|
@ -233,8 +232,8 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
|
|||
{
|
||||
unsigned long ret;
|
||||
|
||||
if (!__builtin_constant_p(size))
|
||||
check_object_size(from, size, true);
|
||||
check_object_size(from, size, true);
|
||||
|
||||
ret = ___copy_to_user(to, from, size);
|
||||
if (unlikely(ret))
|
||||
ret = copy_to_user_fixup(to, from, size);
|
||||
|
|
|
@ -705,7 +705,7 @@ static inline void copy_user_overflow(int size, unsigned long count)
|
|||
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
static __always_inline unsigned long __must_check
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(to);
|
||||
|
@ -725,7 +725,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
return n;
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check
|
||||
static __always_inline unsigned long __must_check
|
||||
copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
int sz = __compiletime_object_size(from);
|
||||
|
|
|
@ -9,7 +9,15 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static size_t cache_size = 1024;
|
||||
/*
|
||||
* Many of the tests here end up using const sizes, but those would
|
||||
* normally be ignored by hardened usercopy, so force the compiler
|
||||
* into choosing the non-const path to make sure we trigger the
|
||||
* hardened usercopy checks by added "unconst" to all the const copies,
|
||||
* and making sure "cache_size" isn't optimized into a const.
|
||||
*/
|
||||
static volatile size_t unconst = 0;
|
||||
static volatile size_t cache_size = 1024;
|
||||
static struct kmem_cache *bad_cache;
|
||||
|
||||
static const unsigned char test_text[] = "This is a test.\n";
|
||||
|
@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
|
|||
if (to_user) {
|
||||
pr_info("attempting good copy_to_user of local stack\n");
|
||||
if (copy_to_user((void __user *)user_addr, good_stack,
|
||||
sizeof(good_stack))) {
|
||||
unconst + sizeof(good_stack))) {
|
||||
pr_warn("copy_to_user failed unexpectedly?!\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
||||
pr_info("attempting bad copy_to_user of distant stack\n");
|
||||
if (copy_to_user((void __user *)user_addr, bad_stack,
|
||||
sizeof(good_stack))) {
|
||||
unconst + sizeof(good_stack))) {
|
||||
pr_warn("copy_to_user failed, but lacked Oops\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
|
|||
|
||||
pr_info("attempting good copy_from_user of local stack\n");
|
||||
if (copy_from_user(good_stack, (void __user *)user_addr,
|
||||
sizeof(good_stack))) {
|
||||
unconst + sizeof(good_stack))) {
|
||||
pr_warn("copy_from_user failed unexpectedly?!\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
||||
pr_info("attempting bad copy_from_user of distant stack\n");
|
||||
if (copy_from_user(bad_stack, (void __user *)user_addr,
|
||||
sizeof(good_stack))) {
|
||||
unconst + sizeof(good_stack))) {
|
||||
pr_warn("copy_from_user failed, but lacked Oops\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
|
|||
{
|
||||
unsigned long user_addr;
|
||||
unsigned char *one, *two;
|
||||
const size_t size = 1024;
|
||||
size_t size = unconst + 1024;
|
||||
|
||||
one = kmalloc(size, GFP_KERNEL);
|
||||
two = kmalloc(size, GFP_KERNEL);
|
||||
|
@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
|
|||
|
||||
pr_info("attempting good copy_to_user from kernel rodata\n");
|
||||
if (copy_to_user((void __user *)user_addr, test_text,
|
||||
sizeof(test_text))) {
|
||||
unconst + sizeof(test_text))) {
|
||||
pr_warn("copy_to_user failed unexpectedly?!\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
||||
pr_info("attempting bad copy_to_user from kernel text\n");
|
||||
if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
|
||||
if (copy_to_user((void __user *)user_addr, vm_mmap,
|
||||
unconst + PAGE_SIZE)) {
|
||||
pr_warn("copy_to_user failed, but lacked Oops\n");
|
||||
goto free_user;
|
||||
}
|
||||
|
|
|
@ -121,7 +121,8 @@ extern void __check_object_size(const void *ptr, unsigned long n,
|
|||
static inline void check_object_size(const void *ptr, unsigned long n,
|
||||
bool to_user)
|
||||
{
|
||||
__check_object_size(ptr, n, to_user);
|
||||
if (!__builtin_constant_p(n))
|
||||
__check_object_size(ptr, n, to_user);
|
||||
}
|
||||
#else
|
||||
static inline void check_object_size(const void *ptr, unsigned long n,
|
||||
|
|
Loading…
Reference in New Issue