Use the new batched user accesses in generic user string handling
This converts the generic user string functions to use the batched user access functions. It makes a big difference on Skylake, which is the first x86 microarchitecture to implement SMAP. The STAC/CLAC instructions are not very fast, and doing them for each access inside the loop that copies strings from user space (which is what the pathname handling does for every pathname the kernel uses, for example) is very inefficient. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5b24a7a2aa
commit
9fd4470ff4
|
@ -39,7 +39,7 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
|
|||
unsigned long c, data;
|
||||
|
||||
/* Fall back to byte-at-a-time if we get a page fault */
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
break;
|
||||
*(unsigned long *)(dst+res) = c;
|
||||
if (has_zero(c, &data, &constants)) {
|
||||
|
@ -55,7 +55,7 @@ byte_at_a_time:
|
|||
while (max) {
|
||||
char c;
|
||||
|
||||
if (unlikely(__get_user(c,src+res)))
|
||||
if (unlikely(unsafe_get_user(c,src+res)))
|
||||
return -EFAULT;
|
||||
dst[res] = c;
|
||||
if (!c)
|
||||
|
@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
|
|||
src_addr = (unsigned long)src;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strncpy_from_user(dst, src, count, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strncpy_from_user(dst, src, count, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return -EFAULT;
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|||
src -= align;
|
||||
max += align;
|
||||
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)src)))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)src)))
|
||||
return 0;
|
||||
c |= aligned_byte_mask(align);
|
||||
|
||||
|
@ -61,7 +61,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
|
|||
if (unlikely(max <= sizeof(unsigned long)))
|
||||
break;
|
||||
max -= sizeof(unsigned long);
|
||||
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
||||
if (unlikely(unsafe_get_user(c,(unsigned long __user *)(src+res))))
|
||||
return 0;
|
||||
}
|
||||
res -= align;
|
||||
|
@ -112,7 +112,12 @@ long strnlen_user(const char __user *str, long count)
|
|||
src_addr = (unsigned long)str;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strnlen_user(str, count, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strnlen_user(str, count, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -141,7 +146,12 @@ long strlen_user(const char __user *str)
|
|||
src_addr = (unsigned long)str;
|
||||
if (likely(src_addr < max_addr)) {
|
||||
unsigned long max = max_addr - src_addr;
|
||||
return do_strnlen_user(str, ~0ul, max);
|
||||
long retval;
|
||||
|
||||
user_access_begin();
|
||||
retval = do_strnlen_user(str, ~0ul, max);
|
||||
user_access_end();
|
||||
return retval;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue