From d02f6b7dab8228487268298ea1f21081c0b4b3eb Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 7 Apr 2020 14:12:45 +1000 Subject: [PATCH 1/8] powerpc/uaccess: Evaluate macro arguments once, before user access is allowed get/put_user() can be called with nontrivial arguments. fs/proc/page.c has a good example: if (put_user(stable_page_flags(ppage), out)) { stable_page_flags() is quite a lot of code, including spin locks in the page allocator. Ensure these arguments are evaluated before user access is allowed. This improves security by reducing code with access to userspace, but it also fixes a PREEMPT bug with KUAP on powerpc/64s: stable_page_flags() is currently called with AMR set to allow writes, it ends up calling spin_unlock(), which can call preempt_schedule. But the task switch code can not be called with AMR set (it relies on interrupts saving the register), so this blows up. It's fine if the code inside allow_user_access() is preemptible, because a timer or IPI will save the AMR, but it's not okay to explicitly cause a reschedule. Fixes: de78a9c42a79 ("powerpc: Add a framework for Kernel Userspace Access Protection") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200407041245.600651-1-npiggin@gmail.com --- arch/powerpc/include/asm/uaccess.h | 49 +++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 2f500debae21..0969285996cb 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -166,13 +166,17 @@ do { \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ if (!is_kernel_addr((unsigned long)__pu_addr)) \ might_fault(); \ - __chk_user_ptr(ptr); \ + __chk_user_ptr(__pu_addr); \ if (do_allow) \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ else \ - __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \ + __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -180,9 +184,13 @@ do { \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ might_fault(); \ - if (access_ok(__pu_addr, size)) \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + if (access_ok(__pu_addr, __pu_size)) \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -190,8 +198,12 @@ do { \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - __chk_user_ptr(ptr); \ - __put_user_size((x), __pu_addr, (size), __pu_err); \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(size) __pu_size = (size); \ + \ + __chk_user_ptr(__pu_addr); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ + \ __pu_err; \ }) @@ -283,15 +295,18 @@ do { \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ barrier_nospec(); \ if (do_allow) \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ else \ - __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -300,12 +315,15 @@ do { \ long __gu_err = -EFAULT; \ __long_type(*(ptr)) __gu_val = 0; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __typeof__(size) __gu_size = (size); \ + \ might_fault(); \ - if (access_ok(__gu_addr, (size))) { \ + if (access_ok(__gu_addr, __gu_size)) { \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) @@ -314,10 +332,13 @@ do { \ long __gu_err; \ __long_type(*(ptr)) __gu_val; \ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - __chk_user_ptr(ptr); \ + __typeof__(size) __gu_size = (size); \ + \ + __chk_user_ptr(__gu_addr); \ barrier_nospec(); \ - __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + \ __gu_err; \ }) From 334710b1496af8a0960e70121f850e209c20958f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 17 Apr 2020 17:08:51 +0000 Subject: [PATCH 2/8] powerpc/uaccess: Implement unsafe_put_user() using 'asm goto' unsafe_put_user() is designed to take benefit of 'asm goto'. Instead of using the standard __put_user() approach and branch based on the returned error, use 'asm goto' and make the exception code branch directly to the error label. There is no code anymore in the fixup section. This change significantly simplifies functions using unsafe_put_user() Small exemple of the benefit with the following code: struct test { u32 item1; u16 item2; u8 item3; u64 item4; }; int set_test_to_user(struct test __user *test, u32 item1, u16 item2, u8 item3, u64 item4) { unsafe_put_user(item1, &test->item1, failed); unsafe_put_user(item2, &test->item2, failed); unsafe_put_user(item3, &test->item3, failed); unsafe_put_user(item4, &test->item4, failed); return 0; failed: return -EFAULT; } Before the patch: 00000be8 : be8: 39 20 00 00 li r9,0 bec: 90 83 00 00 stw r4,0(r3) bf0: 2f 89 00 00 cmpwi cr7,r9,0 bf4: 40 9e 00 38 bne cr7,c2c bf8: b0 a3 00 04 sth r5,4(r3) bfc: 2f 89 00 00 cmpwi cr7,r9,0 c00: 40 9e 00 2c bne cr7,c2c c04: 98 c3 00 06 stb r6,6(r3) c08: 2f 89 00 00 cmpwi cr7,r9,0 c0c: 40 9e 00 20 bne cr7,c2c c10: 90 e3 00 08 stw r7,8(r3) c14: 91 03 00 0c stw r8,12(r3) c18: 21 29 00 00 subfic r9,r9,0 c1c: 7d 29 49 10 subfe r9,r9,r9 c20: 38 60 ff f2 li r3,-14 c24: 7d 23 18 38 and r3,r9,r3 c28: 4e 80 00 20 blr c2c: 38 60 ff f2 li r3,-14 c30: 4e 80 00 20 blr 00000000 <.fixup>: ... b8: 39 20 ff f2 li r9,-14 bc: 48 00 00 00 b bc <.fixup+0xbc> bc: R_PPC_REL24 .text+0xbf0 c0: 39 20 ff f2 li r9,-14 c4: 48 00 00 00 b c4 <.fixup+0xc4> c4: R_PPC_REL24 .text+0xbfc c8: 39 20 ff f2 li r9,-14 cc: 48 00 00 00 b cc <.fixup+0xcc> d0: 39 20 ff f2 li r9,-14 d4: 48 00 00 00 b d4 <.fixup+0xd4> d4: R_PPC_REL24 .text+0xc18 00000000 <__ex_table>: ... a0: R_PPC_REL32 .text+0xbec a4: R_PPC_REL32 .fixup+0xb8 a8: R_PPC_REL32 .text+0xbf8 ac: R_PPC_REL32 .fixup+0xc0 b0: R_PPC_REL32 .text+0xc04 b4: R_PPC_REL32 .fixup+0xc8 b8: R_PPC_REL32 .text+0xc10 bc: R_PPC_REL32 .fixup+0xd0 c0: R_PPC_REL32 .text+0xc14 c4: R_PPC_REL32 .fixup+0xd0 After the patch: 00000be8 : be8: 90 83 00 00 stw r4,0(r3) bec: b0 a3 00 04 sth r5,4(r3) bf0: 98 c3 00 06 stb r6,6(r3) bf4: 90 e3 00 08 stw r7,8(r3) bf8: 91 03 00 0c stw r8,12(r3) bfc: 38 60 00 00 li r3,0 c00: 4e 80 00 20 blr c04: 38 60 ff f2 li r3,-14 c08: 4e 80 00 20 blr 00000000 <__ex_table>: ... a0: R_PPC_REL32 .text+0xbe8 a4: R_PPC_REL32 .text+0xc04 a8: R_PPC_REL32 .text+0xbec ac: R_PPC_REL32 .text+0xc04 b0: R_PPC_REL32 .text+0xbf0 b4: R_PPC_REL32 .text+0xc04 b8: R_PPC_REL32 .text+0xbf4 bc: R_PPC_REL32 .text+0xc04 c0: R_PPC_REL32 .text+0xbf8 c4: R_PPC_REL32 .text+0xc04 Signed-off-by: Christophe Leroy Reviewed-by: Segher Boessenkool Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/23e680624680a9a5405f4b88740d2596d4b17c26.1587143308.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/uaccess.h | 61 +++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 0969285996cb..3f30a1dbc198 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -93,12 +93,12 @@ static inline int __access_ok(unsigned long addr, unsigned long size, #define __get_user(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true) #define __put_user(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true) + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __put_user_goto(x, ptr, label) \ + __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) #define __get_user_allowed(x, ptr) \ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false) -#define __put_user_allowed(x, ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false) #define __get_user_inatomic(x, ptr) \ __get_user_nosleep((x), (ptr), sizeof(*(ptr))) @@ -162,7 +162,7 @@ do { \ prevent_write_to_user(ptr, size); \ } while (0) -#define __put_user_nocheck(x, ptr, size, do_allow) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ @@ -172,10 +172,7 @@ do { \ if (!is_kernel_addr((unsigned long)__pu_addr)) \ might_fault(); \ __chk_user_ptr(__pu_addr); \ - if (do_allow) \ - __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ - else \ - __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \ + __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \ \ __pu_err; \ }) @@ -208,6 +205,52 @@ do { \ }) +#define __put_user_asm_goto(x, addr, label, op) \ + asm volatile goto( \ + "1: " op "%U1%X1 %0,%1 # put_user\n" \ + EX_TABLE(1b, %l2) \ + : \ + : "r" (x), "m<>" (*addr) \ + : \ + : label) + +#ifdef __powerpc64__ +#define __put_user_asm2_goto(x, ptr, label) \ + __put_user_asm_goto(x, ptr, label, "std") +#else /* __powerpc64__ */ +#define __put_user_asm2_goto(x, addr, label) \ + asm volatile goto( \ + "1: stw%X1 %0, %1\n" \ + "2: stw%X1 %L0, %L1\n" \ + EX_TABLE(1b, %l2) \ + EX_TABLE(2b, %l2) \ + : \ + : "r" (x), "m" (*addr) \ + : \ + : label) +#endif /* __powerpc64__ */ + +#define __put_user_size_goto(x, ptr, size, label) \ +do { \ + switch (size) { \ + case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \ + case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \ + case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \ + case 8: __put_user_asm2_goto(x, ptr, label); break; \ + default: __put_user_bad(); \ + } \ +} while (0) + +#define __put_user_nocheck_goto(x, ptr, size, label) \ +do { \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (!is_kernel_addr((unsigned long)__pu_addr)) \ + might_fault(); \ + __chk_user_ptr(ptr); \ + __put_user_size_goto((x), __pu_addr, (size), label); \ +} while (0) + + extern long __get_user_bad(void); /* @@ -491,7 +534,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) -#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e) +#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) #define unsafe_copy_to_user(d, s, l, e) \ unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) From 17bc43367fc2a720400d21c745db641c654c1e6b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 17 Apr 2020 17:08:52 +0000 Subject: [PATCH 3/8] powerpc/uaccess: Implement unsafe_copy_to_user() as a simple loop At the time being, unsafe_copy_to_user() is based on raw_copy_to_user() which calls __copy_tofrom_user(). __copy_tofrom_user() is a big optimised function to copy big amount of data. It aligns destinations to cache line in order to use dcbz instruction. Today unsafe_copy_to_user() is called only from filldir(). It is used to mainly copy small amount of data like filenames, so __copy_tofrom_user() is not fit. Also, unsafe_copy_to_user() is used within user_access_begin/end sections. In those section, it is preferable to not call functions. Rewrite unsafe_copy_to_user() as a macro that uses __put_user_goto(). We first perform a loop of long, then we finish with necessary complements. unsafe_copy_to_user() might be used in the near future to copy fixed-size data, like pt_regs structs during signal processing. Having it as a macro allows GCC to optimise it for instead when it knows the size in advance, it can unloop loops, drop complements when the size is a multiple of longs, etc ... Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/fe952112c29bf6a0a2778c9e6bbb4f4afd2c4258.1587143308.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/uaccess.h | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 3f30a1dbc198..42b6c44e36a7 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -535,7 +535,26 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) + #define unsafe_copy_to_user(d, s, l, e) \ - unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e) +do { \ + u8 __user *_dst = (u8 __user *)(d); \ + const u8 *_src = (const u8 *)(s); \ + size_t _len = (l); \ + int _i; \ + \ + for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \ + __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\ + if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \ + __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \ + _i += 4; \ + } \ + if (_len & 2) { \ + __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \ + _i += 2; \ + } \ + if (_len & 1) \ + __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\ +} while (0) #endif /* _ARCH_POWERPC_UACCESS_H */ From 999a22890cb183b918e4372395d24426a755cef2 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 3 Apr 2020 07:20:50 +0000 Subject: [PATCH 4/8] uaccess: Add user_read_access_begin/end and user_write_access_begin/end Some architectures like powerpc64 have the capability to separate read access and write access protection. For get_user() and copy_from_user(), powerpc64 only open read access. For put_user() and copy_to_user(), powerpc64 only open write access. But when using unsafe_get_user() or unsafe_put_user(), user_access_begin open both read and write. Other architectures like powerpc book3s 32 bits only allow write access protection. And on this architecture protection is an heavy operation as it requires locking/unlocking per segment of 256Mbytes. On those architecture it is therefore desirable to do the unlocking only for write access. (Note that book3s/32 ranges from very old powermac from the 90's with powerpc 601 processor, till modern ADSL boxes with PowerQuicc II processors for instance so it is still worth considering.) In order to avoid any risk based of hacking some variable parameters passed to user_access_begin/end that would allow hacking and leaving user access open or opening too much, it is preferable to use dedicated static functions that can't be overridden. Add a user_read_access_begin and user_read_access_end to only open read access. Add a user_write_access_begin and user_write_access_end to only open write access. By default, when undefined, those new access helpers default on the existing user_access_begin and user_access_end. Signed-off-by: Christophe Leroy Reviewed-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/36e43241c7f043a24b5069e78c6a7edd11043be5.1585898438.git.christophe.leroy@c-s.fr --- include/linux/uaccess.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 67f016010aad..9861c89f93be 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -378,6 +378,14 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif +#ifndef user_write_access_begin +#define user_write_access_begin user_access_begin +#define user_write_access_end user_access_end +#endif +#ifndef user_read_access_begin +#define user_read_access_begin user_access_begin +#define user_read_access_end user_access_end +#endif #ifdef CONFIG_HARDENED_USERCOPY void usercopy_warn(const char *name, const char *detail, bool to_user, From 41cd780524674082b037e7c8461f90c5e42103f0 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 3 Apr 2020 07:20:51 +0000 Subject: [PATCH 5/8] uaccess: Selectively open read or write user access When opening user access to only perform reads, only open read access. When opening user access to only perform writes, only open write access. Signed-off-by: Christophe Leroy Reviewed-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/2e73bc57125c2c6ab12a587586a4eed3a47105fc.1585898438.git.christophe.leroy@c-s.fr --- fs/readdir.c | 12 ++++++------ kernel/compat.c | 12 ++++++------ kernel/exit.c | 12 ++++++------ lib/strncpy_from_user.c | 4 ++-- lib/strnlen_user.c | 4 ++-- lib/usercopy.c | 6 +++--- 6 files changed, 25 insertions(+), 25 deletions(-) diff --git a/fs/readdir.c b/fs/readdir.c index de2eceffdee8..ed6aaad451aa 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -242,7 +242,7 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen, return -EINTR; dirent = buf->current_dir; prev = (void __user *) dirent - prev_reclen; - if (!user_access_begin(prev, reclen + prev_reclen)) + if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; /* This might be 'dirent->d_off', but if so it will get overwritten */ @@ -251,14 +251,14 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen, unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); - user_access_end(); + user_write_access_end(); buf->current_dir = (void __user *)dirent + reclen; buf->prev_reclen = reclen; buf->count -= reclen; return 0; efault_end: - user_access_end(); + user_write_access_end(); efault: buf->error = -EFAULT; return -EFAULT; @@ -327,7 +327,7 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen, return -EINTR; dirent = buf->current_dir; prev = (void __user *)dirent - prev_reclen; - if (!user_access_begin(prev, reclen + prev_reclen)) + if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; /* This might be 'dirent->d_off', but if so it will get overwritten */ @@ -336,7 +336,7 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen, unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, &dirent->d_type, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); - user_access_end(); + user_write_access_end(); buf->prev_reclen = reclen; buf->current_dir = (void __user *)dirent + reclen; @@ -344,7 +344,7 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen, return 0; efault_end: - user_access_end(); + user_write_access_end(); efault: buf->error = -EFAULT; return -EFAULT; diff --git a/kernel/compat.c b/kernel/compat.c index 843dd17e6078..b8d2800bb4b7 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -199,7 +199,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); - if (!user_access_begin(umask, bitmap_size / 8)) + if (!user_read_access_begin(umask, bitmap_size / 8)) return -EFAULT; while (nr_compat_longs > 1) { @@ -211,11 +211,11 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, } if (nr_compat_longs) unsafe_get_user(*mask, umask++, Efault); - user_access_end(); + user_read_access_end(); return 0; Efault: - user_access_end(); + user_read_access_end(); return -EFAULT; } @@ -228,7 +228,7 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG); nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size); - if (!user_access_begin(umask, bitmap_size / 8)) + if (!user_write_access_begin(umask, bitmap_size / 8)) return -EFAULT; while (nr_compat_longs > 1) { @@ -239,10 +239,10 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, } if (nr_compat_longs) unsafe_put_user((compat_ulong_t)*mask, umask++, Efault); - user_access_end(); + user_write_access_end(); return 0; Efault: - user_access_end(); + user_write_access_end(); return -EFAULT; } diff --git a/kernel/exit.c b/kernel/exit.c index 389a88cb3081..2d97cbba512d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1557,7 +1557,7 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, if (!infop) return err; - if (!user_access_begin(infop, sizeof(*infop))) + if (!user_write_access_begin(infop, sizeof(*infop))) return -EFAULT; unsafe_put_user(signo, &infop->si_signo, Efault); @@ -1566,10 +1566,10 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, unsafe_put_user(info.pid, &infop->si_pid, Efault); unsafe_put_user(info.uid, &infop->si_uid, Efault); unsafe_put_user(info.status, &infop->si_status, Efault); - user_access_end(); + user_write_access_end(); return err; Efault: - user_access_end(); + user_write_access_end(); return -EFAULT; } @@ -1684,7 +1684,7 @@ COMPAT_SYSCALL_DEFINE5(waitid, if (!infop) return err; - if (!user_access_begin(infop, sizeof(*infop))) + if (!user_write_access_begin(infop, sizeof(*infop))) return -EFAULT; unsafe_put_user(signo, &infop->si_signo, Efault); @@ -1693,10 +1693,10 @@ COMPAT_SYSCALL_DEFINE5(waitid, unsafe_put_user(info.pid, &infop->si_pid, Efault); unsafe_put_user(info.uid, &infop->si_uid, Efault); unsafe_put_user(info.status, &infop->si_status, Efault); - user_access_end(); + user_write_access_end(); return err; Efault: - user_access_end(); + user_write_access_end(); return -EFAULT; } #endif diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 706020b06617..b90ec550183a 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -116,9 +116,9 @@ long strncpy_from_user(char *dst, const char __user *src, long count) kasan_check_write(dst, count); check_object_size(dst, count, false); - if (user_access_begin(src, max)) { + if (user_read_access_begin(src, max)) { retval = do_strncpy_from_user(dst, src, count, max); - user_access_end(); + user_read_access_end(); return retval; } } diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 41670d4a5816..1616710b8a82 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -109,9 +109,9 @@ long strnlen_user(const char __user *str, long count) if (max > count) max = count; - if (user_access_begin(str, max)) { + if (user_read_access_begin(str, max)) { retval = do_strnlen_user(str, count, max); - user_access_end(); + user_read_access_end(); return retval; } } diff --git a/lib/usercopy.c b/lib/usercopy.c index cbb4d9ec00f2..ca2a697a2061 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -58,7 +58,7 @@ int check_zeroed_user(const void __user *from, size_t size) from -= align; size += align; - if (!user_access_begin(from, size)) + if (!user_read_access_begin(from, size)) return -EFAULT; unsafe_get_user(val, (unsigned long __user *) from, err_fault); @@ -79,10 +79,10 @@ int check_zeroed_user(const void __user *from, size_t size) val &= aligned_byte_mask(size); done: - user_access_end(); + user_read_access_end(); return (val == 0); err_fault: - user_access_end(); + user_read_access_end(); return -EFAULT; } EXPORT_SYMBOL(check_zeroed_user); From b44f687386875b714dae2afa768e73401e45c21c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 3 Apr 2020 07:20:52 +0000 Subject: [PATCH 6/8] drm/i915/gem: Replace user_access_begin by user_write_access_begin When i915_gem_execbuffer2_ioctl() is using user_access_begin(), that's only to perform unsafe_put_user() so use user_write_access_begin() in order to only open write access. Signed-off-by: Christophe Leroy Reviewed-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ebf1250b6d4f351469fb339e5399d8b92aa8a1c1.1585898438.git.christophe.leroy@c-s.fr --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index b7440f06c5e2..8a4e9c1cbf6c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2794,7 +2794,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, * And this range already got effectively checked earlier * when we did the "copy_from_user()" above. */ - if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) + if (!user_write_access_begin(user_exec_list, + count * sizeof(*user_exec_list))) goto end; for (i = 0; i < args->buffer_count; i++) { @@ -2808,7 +2809,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, end_user); } end_user: - user_access_end(); + user_write_access_end(); end:; } From 4fe5cda9f89d0aea8e915b7c96ae34bda4e12e51 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 3 Apr 2020 07:20:53 +0000 Subject: [PATCH 7/8] powerpc/uaccess: Implement user_read_access_begin and user_write_access_begin Add support for selective read or write user access with user_read_access_begin/end and user_write_access_begin/end. Signed-off-by: Christophe Leroy Reviewed-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/6c83af0f0809ef2a955c39ac622767f6cbede035.1585898438.git.christophe.leroy@c-s.fr --- arch/powerpc/include/asm/book3s/32/kup.h | 4 ++-- arch/powerpc/include/asm/kup.h | 14 +++++++++++++- arch/powerpc/include/asm/uaccess.h | 22 ++++++++++++++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h index 3c0ba22dc360..1617e73bee30 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -108,7 +108,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user u32 addr, end; BUILD_BUG_ON(!__builtin_constant_p(dir)); - BUILD_BUG_ON(dir == KUAP_CURRENT); + BUILD_BUG_ON(dir & ~KUAP_READ_WRITE); if (!(dir & KUAP_WRITE)) return; @@ -131,7 +131,7 @@ static __always_inline void prevent_user_access(void __user *to, const void __us BUILD_BUG_ON(!__builtin_constant_p(dir)); - if (dir == KUAP_CURRENT) { + if (dir & KUAP_CURRENT_WRITE) { u32 kuap = current->thread.kuap; if (unlikely(!kuap)) diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 92bcd1a26d73..c745ee41ad66 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -10,7 +10,9 @@ * Use the current saved situation instead of the to/from/size params. * Used on book3s/32 */ -#define KUAP_CURRENT 4 +#define KUAP_CURRENT_READ 4 +#define KUAP_CURRENT_WRITE 8 +#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE) #ifdef CONFIG_PPC64 #include @@ -101,6 +103,16 @@ static inline void prevent_current_access_user(void) prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT); } +static inline void prevent_current_read_from_user(void) +{ + prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ); +} + +static inline void prevent_current_write_to_user(void) +{ + prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE); +} + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_KUAP_H_ */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 42b6c44e36a7..62cc8d7640ec 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -532,6 +532,28 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t #define user_access_save prevent_user_access_return #define user_access_restore restore_user_access +static __must_check inline bool +user_read_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + allow_read_from_user(ptr, len); + return true; +} +#define user_read_access_begin user_read_access_begin +#define user_read_access_end prevent_current_read_from_user + +static __must_check inline bool +user_write_access_begin(const void __user *ptr, size_t len) +{ + if (unlikely(!access_ok(ptr, len))) + return false; + allow_write_to_user((void __user *)ptr, len); + return true; +} +#define user_write_access_begin user_write_access_begin +#define user_write_access_end prevent_current_write_to_user + #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e) #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e) From e2a8b49e79553bd8ec48f73cead84e6146c09408 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 7 May 2020 22:33:24 +1000 Subject: [PATCH 8/8] powerpc/uaccess: Don't use "m<>" constraint The "m<>" constraint breaks compilation with GCC 4.6.x era compilers. The use of the constraint allows the compiler to use update-form instructions, however in practice current compilers never generate those forms for any of the current uses of __put_user_asm_goto(). We anticipate that GCC 4.6 will be declared unsupported for building the kernel in the not too distant future. So for now just switch to the "m" constraint. Fixes: 334710b1496a ("powerpc/uaccess: Implement unsafe_put_user() using 'asm goto'") Signed-off-by: Michael Ellerman Acked-by: Segher Boessenkool Link: https://lore.kernel.org/r/20200507123324.2250024-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 62cc8d7640ec..164112007f54 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -210,7 +210,7 @@ do { \ "1: " op "%U1%X1 %0,%1 # put_user\n" \ EX_TABLE(1b, %l2) \ : \ - : "r" (x), "m<>" (*addr) \ + : "r" (x), "m" (*addr) \ : \ : label)