2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* User address space access functions.
|
|
|
|
*
|
|
|
|
* Copyright 1997 Andi Kleen <ak@muc.de>
|
|
|
|
* Copyright 1997 Linus Torvalds
|
|
|
|
* Copyright 2002 Andi Kleen <ak@suse.de>
|
|
|
|
*/
|
2016-07-14 08:18:57 +08:00
|
|
|
#include <linux/export.h>
|
2016-07-15 04:22:57 +08:00
|
|
|
#include <linux/uaccess.h>
|
2017-05-30 03:22:50 +08:00
|
|
|
#include <linux/highmem.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Zero Userspace
|
|
|
|
*/
|
|
|
|
|
|
|
|
unsigned long __clear_user(void __user *addr, unsigned long size)
|
|
|
|
{
|
|
|
|
long __d0;
|
2008-09-10 19:37:17 +08:00
|
|
|
might_fault();
|
2005-04-17 06:20:36 +08:00
|
|
|
/* no memory constraint because it doesn't change any memory gcc knows
|
|
|
|
about */
|
2012-09-22 03:43:12 +08:00
|
|
|
stac();
|
2005-04-17 06:20:36 +08:00
|
|
|
asm volatile(
|
|
|
|
" testq %[size8],%[size8]\n"
|
|
|
|
" jz 4f\n"
|
|
|
|
"0: movq %[zero],(%[dst])\n"
|
|
|
|
" addq %[eight],%[dst]\n"
|
|
|
|
" decl %%ecx ; jnz 0b\n"
|
|
|
|
"4: movq %[size1],%%rcx\n"
|
|
|
|
" testl %%ecx,%%ecx\n"
|
|
|
|
" jz 2f\n"
|
|
|
|
"1: movb %b[zero],(%[dst])\n"
|
|
|
|
" incq %[dst]\n"
|
|
|
|
" decl %%ecx ; jnz 1b\n"
|
|
|
|
"2:\n"
|
|
|
|
".section .fixup,\"ax\"\n"
|
|
|
|
"3: lea 0(%[size1],%[size8],8),%[size8]\n"
|
|
|
|
" jmp 2b\n"
|
|
|
|
".previous\n"
|
2008-02-04 23:47:57 +08:00
|
|
|
_ASM_EXTABLE(0b,3b)
|
|
|
|
_ASM_EXTABLE(1b,2b)
|
x86: use early clobbers in usercopy*.c
Impact: fix rare (but currently harmless) miscompile with certain configs and gcc versions
Hugh Dickins noticed that strncpy_from_user() was miscompiled
in some circumstances with gcc 4.3.
Thanks to Hugh's excellent analysis it was easy to track down.
Hugh writes:
> Try building an x86_64 defconfig 2.6.29-rc1 kernel tree,
> except not quite defconfig, switch CONFIG_PREEMPT_NONE=y
> and CONFIG_PREEMPT_VOLUNTARY off (because it expands a
> might_fault() there, which hides the issue): using a
> gcc 4.3.2 (I've checked both openSUSE 11.1 and Fedora 10).
>
> It generates the following:
>
> 0000000000000000 <__strncpy_from_user>:
> 0: 48 89 d1 mov %rdx,%rcx
> 3: 48 85 c9 test %rcx,%rcx
> 6: 74 0e je 16 <__strncpy_from_user+0x16>
> 8: ac lods %ds:(%rsi),%al
> 9: aa stos %al,%es:(%rdi)
> a: 84 c0 test %al,%al
> c: 74 05 je 13 <__strncpy_from_user+0x13>
> e: 48 ff c9 dec %rcx
> 11: 75 f5 jne 8 <__strncpy_from_user+0x8>
> 13: 48 29 c9 sub %rcx,%rcx
> 16: 48 89 c8 mov %rcx,%rax
> 19: c3 retq
>
> Observe that "sub %rcx,%rcx; mov %rcx,%rax", whereas gcc 4.2.1
> (and many other configs) say "sub %rcx,%rdx; mov %rdx,%rax".
> Isn't it returning 0 when it ought to be returning strlen?
The asm constraints for the strncpy_from_user() result were missing an
early clobber, which tells gcc that the last output arguments
are written before all input arguments are read.
Also add more early clobbers in the rest of the file and fix 32-bit
usercopy.c in the same way.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
[ since this API is rarely used and no in-kernel user relies on a 'len'
return value (they only rely on negative return values) this miscompile
was never noticed in the field. But it's worth fixing it nevertheless. ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-01-16 22:22:11 +08:00
|
|
|
: [size8] "=&c"(size), [dst] "=&D" (__d0)
|
2005-04-17 06:20:36 +08:00
|
|
|
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
|
|
|
|
[zero] "r" (0UL), [eight] "r" (8UL));
|
2012-09-22 03:43:12 +08:00
|
|
|
clac();
|
2005-04-17 06:20:36 +08:00
|
|
|
return size;
|
|
|
|
}
|
2006-06-26 19:59:44 +08:00
|
|
|
EXPORT_SYMBOL(__clear_user);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
unsigned long clear_user(void __user *to, unsigned long n)
|
|
|
|
{
|
|
|
|
if (access_ok(VERIFY_WRITE, to, n))
|
|
|
|
return __clear_user(to, n);
|
|
|
|
return n;
|
|
|
|
}
|
2006-06-26 19:59:44 +08:00
|
|
|
EXPORT_SYMBOL(clear_user);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-02 21:48:21 +08:00
|
|
|
/*
|
|
|
|
* Try to copy last bytes and clear the rest if needed.
|
|
|
|
* Since protection fault in copy_from/to_user is not a normal situation,
|
|
|
|
* it is not necessary to optimize tail handling.
|
|
|
|
*/
|
2013-08-06 06:02:43 +08:00
|
|
|
__visible unsigned long
|
2015-04-07 01:26:17 +08:00
|
|
|
copy_user_handle_tail(char *to, char *from, unsigned len)
|
2008-07-02 21:48:21 +08:00
|
|
|
{
|
2013-03-18 23:02:21 +08:00
|
|
|
for (; len; --len, to++) {
|
2015-04-07 01:26:17 +08:00
|
|
|
char c;
|
|
|
|
|
2008-07-02 21:48:21 +08:00
|
|
|
if (__get_user_nocheck(c, from++, sizeof(char)))
|
|
|
|
break;
|
2013-03-18 23:02:21 +08:00
|
|
|
if (__put_user_nocheck(c, to, sizeof(char)))
|
2008-07-02 21:48:21 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-09-22 03:43:12 +08:00
|
|
|
clac();
|
2008-07-02 21:48:21 +08:00
|
|
|
return len;
|
|
|
|
}
|
2017-05-30 03:22:50 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
|
|
/**
|
|
|
|
* clean_cache_range - write back a cache range with CLWB
|
|
|
|
* @vaddr: virtual start address
|
|
|
|
* @size: number of bytes to write back
|
|
|
|
*
|
|
|
|
* Write back a cache range using the CLWB (cache line write back)
|
|
|
|
* instruction. Note that @size is internally rounded up to be cache
|
|
|
|
* line size aligned.
|
|
|
|
*/
|
|
|
|
static void clean_cache_range(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
|
|
|
|
unsigned long clflush_mask = x86_clflush_size - 1;
|
|
|
|
void *vend = addr + size;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
for (p = (void *)((unsigned long)addr & ~clflush_mask);
|
|
|
|
p < vend; p += x86_clflush_size)
|
|
|
|
clwb(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
|
|
{
|
|
|
|
unsigned long flushed, dest = (unsigned long) dst;
|
|
|
|
long rc = __copy_user_nocache(dst, src, size, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* __copy_user_nocache() uses non-temporal stores for the bulk
|
|
|
|
* of the transfer, but we need to manually flush if the
|
|
|
|
* transfer is unaligned. A cached memory copy is used when
|
|
|
|
* destination or size is not naturally aligned. That is:
|
|
|
|
* - Require 8-byte alignment when size is 8 bytes or larger.
|
|
|
|
* - Require 4-byte alignment when size is 4 bytes.
|
|
|
|
*/
|
|
|
|
if (size < 8) {
|
|
|
|
if (!IS_ALIGNED(dest, 4) || size != 4)
|
|
|
|
clean_cache_range(dst, 1);
|
|
|
|
} else {
|
|
|
|
if (!IS_ALIGNED(dest, 8)) {
|
|
|
|
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
|
|
|
clean_cache_range(dst, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
flushed = dest - (unsigned long) dst;
|
|
|
|
if (size > flushed && !IS_ALIGNED(size - flushed, 8))
|
|
|
|
clean_cache_range(dst + size - 1, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void memcpy_flushcache(void *_dst, const void *_src, size_t size)
|
|
|
|
{
|
|
|
|
unsigned long dest = (unsigned long) _dst;
|
|
|
|
unsigned long source = (unsigned long) _src;
|
|
|
|
|
|
|
|
/* cache copy and flush to align dest */
|
|
|
|
if (!IS_ALIGNED(dest, 8)) {
|
|
|
|
unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
|
|
|
|
|
|
|
|
memcpy((void *) dest, (void *) source, len);
|
|
|
|
clean_cache_range((void *) dest, len);
|
|
|
|
dest += len;
|
|
|
|
source += len;
|
|
|
|
size -= len;
|
|
|
|
if (!size)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 4x8 movnti loop */
|
|
|
|
while (size >= 32) {
|
|
|
|
asm("movq (%0), %%r8\n"
|
|
|
|
"movq 8(%0), %%r9\n"
|
|
|
|
"movq 16(%0), %%r10\n"
|
|
|
|
"movq 24(%0), %%r11\n"
|
|
|
|
"movnti %%r8, (%1)\n"
|
|
|
|
"movnti %%r9, 8(%1)\n"
|
|
|
|
"movnti %%r10, 16(%1)\n"
|
|
|
|
"movnti %%r11, 24(%1)\n"
|
|
|
|
:: "r" (source), "r" (dest)
|
|
|
|
: "memory", "r8", "r9", "r10", "r11");
|
|
|
|
dest += 32;
|
|
|
|
source += 32;
|
|
|
|
size -= 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 1x8 movnti loop */
|
|
|
|
while (size >= 8) {
|
|
|
|
asm("movq (%0), %%r8\n"
|
|
|
|
"movnti %%r8, (%1)\n"
|
|
|
|
:: "r" (source), "r" (dest)
|
|
|
|
: "memory", "r8");
|
|
|
|
dest += 8;
|
|
|
|
source += 8;
|
|
|
|
size -= 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 1x4 movnti loop */
|
|
|
|
while (size >= 4) {
|
|
|
|
asm("movl (%0), %%r8d\n"
|
|
|
|
"movnti %%r8d, (%1)\n"
|
|
|
|
:: "r" (source), "r" (dest)
|
|
|
|
: "memory", "r8");
|
|
|
|
dest += 4;
|
|
|
|
source += 4;
|
|
|
|
size -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cache copy for remaining bytes */
|
|
|
|
if (size) {
|
|
|
|
memcpy((void *) dest, (void *) source, size);
|
|
|
|
clean_cache_range((void *) dest, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(memcpy_flushcache);
|
|
|
|
|
|
|
|
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
|
|
|
|
size_t len)
|
|
|
|
{
|
|
|
|
char *from = kmap_atomic(page);
|
|
|
|
|
|
|
|
memcpy_flushcache(to, from + offset, len);
|
|
|
|
kunmap_atomic(from);
|
|
|
|
}
|
|
|
|
#endif
|