2006-06-23 17:04:16 +08:00
|
|
|
#ifndef __LINUX_UACCESS_H__
|
|
|
|
#define __LINUX_UACCESS_H__
|
|
|
|
|
2015-05-11 23:52:06 +08:00
|
|
|
#include <linux/sched.h>
|
2016-12-28 07:14:09 +08:00
|
|
|
#include <linux/thread_info.h>
|
2017-03-21 09:56:06 +08:00
|
|
|
#include <linux/kasan-checks.h>
|
2016-12-28 07:00:15 +08:00
|
|
|
|
|
|
|
#define VERIFY_READ 0
|
|
|
|
#define VERIFY_WRITE 1
|
|
|
|
|
2017-03-21 09:08:07 +08:00
|
|
|
#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
|
|
|
|
|
2006-06-23 17:04:16 +08:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
2017-03-21 09:56:06 +08:00
|
|
|
/*
|
|
|
|
* Architectures should provide two primitives (raw_copy_{to,from}_user())
|
2017-04-06 07:15:53 +08:00
|
|
|
* and get rid of their private instances of copy_{to,from}_user() and
|
|
|
|
* __copy_{to,from}_user{,_inatomic}().
|
2017-03-21 09:56:06 +08:00
|
|
|
*
|
|
|
|
* raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
|
|
|
|
* return the amount left to copy. They should assume that access_ok() has
|
|
|
|
* already been checked (and succeeded); they should *not* zero-pad anything.
|
|
|
|
* No KASAN or object size checks either - those belong here.
|
|
|
|
*
|
|
|
|
* Both of these functions should attempt to copy size bytes starting at from
|
|
|
|
* into the area starting at to. They must not fetch or store anything
|
|
|
|
* outside of those areas. Return value must be between 0 (everything
|
|
|
|
* copied successfully) and size (nothing copied).
|
|
|
|
*
|
|
|
|
* If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
|
|
|
|
* at to must become equal to the bytes fetched from the corresponding area
|
|
|
|
* starting at from. All data past to + size - N must be left unmodified.
|
|
|
|
*
|
|
|
|
* If copying succeeds, the return value must be 0. If some data cannot be
|
|
|
|
* fetched, it is permitted to copy less than had been fetched; the only
|
|
|
|
* hard requirement is that not storing anything at all (i.e. returning size)
|
|
|
|
* should happen only when nothing could be copied. In other words, you don't
|
|
|
|
* have to squeeze as much as possible - it is allowed, but not necessary.
|
|
|
|
*
|
|
|
|
* For raw_copy_from_user() to always points to kernel memory and no faults
|
|
|
|
* on store should happen. Interpretation of from is affected by set_fs().
|
|
|
|
* For raw_copy_to_user() it's the other way round.
|
|
|
|
*
|
|
|
|
* Both can be inlined - it's up to architectures whether it wants to bother
|
|
|
|
* with that. They should not be used directly; they are used to implement
|
|
|
|
* the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
|
|
|
|
* that are used instead. Out of those, __... ones are inlined. Plain
|
|
|
|
* copy_{to,from}_user() might or might not be inlined. If you want them
|
|
|
|
* inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
|
|
|
|
*
|
|
|
|
* NOTE: only copy_from_user() zero-pads the destination in case of short copy.
|
|
|
|
* Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
|
|
|
|
* at all; their callers absolutely must check the return value.
|
|
|
|
*
|
|
|
|
* Biarch ones should also provide raw_copy_in_user() - similar to the above,
|
|
|
|
* but both source and destination are __user pointers (affected by set_fs()
|
|
|
|
* as usual) and both source and destination can trigger faults.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static __always_inline unsigned long
|
|
|
|
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
|
|
|
kasan_check_write(to, n);
|
|
|
|
check_object_size(to, n, false);
|
|
|
|
return raw_copy_from_user(to, from, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline unsigned long
|
|
|
|
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
|
|
|
might_fault();
|
|
|
|
kasan_check_write(to, n);
|
|
|
|
check_object_size(to, n, false);
|
|
|
|
return raw_copy_from_user(to, from, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
|
|
|
|
* @to: Destination address, in user space.
|
|
|
|
* @from: Source address, in kernel space.
|
|
|
|
* @n: Number of bytes to copy.
|
|
|
|
*
|
|
|
|
* Context: User context only.
|
|
|
|
*
|
|
|
|
* Copy data from kernel space to user space. Caller must check
|
|
|
|
* the specified block with access_ok() before calling this function.
|
|
|
|
* The caller should also make sure he pins the user space address
|
|
|
|
* so that we don't result in page fault and sleep.
|
|
|
|
*/
|
|
|
|
static __always_inline unsigned long
|
|
|
|
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
|
|
|
{
|
|
|
|
kasan_check_read(from, n);
|
|
|
|
check_object_size(from, n, true);
|
|
|
|
return raw_copy_to_user(to, from, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline unsigned long
|
|
|
|
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
|
|
{
|
|
|
|
might_fault();
|
|
|
|
kasan_check_read(from, n);
|
|
|
|
check_object_size(from, n, true);
|
|
|
|
return raw_copy_to_user(to, from, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef INLINE_COPY_FROM_USER
|
|
|
|
static inline unsigned long
|
|
|
|
_copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
|
|
|
unsigned long res = n;
|
2017-06-30 09:39:54 +08:00
|
|
|
might_fault();
|
|
|
|
if (likely(access_ok(VERIFY_READ, from, n))) {
|
|
|
|
kasan_check_write(to, n);
|
2017-03-21 09:56:06 +08:00
|
|
|
res = raw_copy_from_user(to, from, n);
|
2017-06-30 09:39:54 +08:00
|
|
|
}
|
2017-03-21 09:56:06 +08:00
|
|
|
if (unlikely(res))
|
|
|
|
memset(to + (n - res), 0, res);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
extern unsigned long
|
|
|
|
_copy_from_user(void *, const void __user *, unsigned long);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef INLINE_COPY_TO_USER
|
|
|
|
static inline unsigned long
|
|
|
|
_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
|
|
{
|
2017-06-30 09:39:54 +08:00
|
|
|
might_fault();
|
|
|
|
if (access_ok(VERIFY_WRITE, to, n)) {
|
|
|
|
kasan_check_read(from, n);
|
2017-03-21 09:56:06 +08:00
|
|
|
n = raw_copy_to_user(to, from, n);
|
2017-06-30 09:39:54 +08:00
|
|
|
}
|
2017-03-21 09:56:06 +08:00
|
|
|
return n;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
extern unsigned long
|
|
|
|
_copy_to_user(void __user *, const void *, unsigned long);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static __always_inline unsigned long __must_check
|
|
|
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
2017-06-30 09:42:43 +08:00
|
|
|
if (likely(check_copy_size(to, n, false)))
|
2017-03-21 09:56:06 +08:00
|
|
|
n = _copy_from_user(to, from, n);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline unsigned long __must_check
|
|
|
|
copy_to_user(void __user *to, const void *from, unsigned long n)
|
|
|
|
{
|
2017-06-30 09:42:43 +08:00
|
|
|
if (likely(check_copy_size(from, n, true)))
|
2017-03-21 09:56:06 +08:00
|
|
|
n = _copy_to_user(to, from, n);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static __always_inline unsigned long __must_check
|
|
|
|
copy_in_user(void __user *to, const void *from, unsigned long n)
|
|
|
|
{
|
|
|
|
might_fault();
|
|
|
|
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
|
|
|
|
n = raw_copy_in_user(to, from, n);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-05-11 23:52:06 +08:00
|
|
|
static __always_inline void pagefault_disabled_inc(void)
|
|
|
|
{
|
|
|
|
current->pagefault_disabled++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __always_inline void pagefault_disabled_dec(void)
|
|
|
|
{
|
|
|
|
current->pagefault_disabled--;
|
|
|
|
}
|
|
|
|
|
2006-12-07 12:32:20 +08:00
|
|
|
/*
|
2015-05-11 23:52:06 +08:00
|
|
|
* These routines enable/disable the pagefault handler. If disabled, it will
|
|
|
|
* not take any locks and go straight to the fixup table.
|
|
|
|
*
|
2015-05-11 23:52:20 +08:00
|
|
|
* User access methods will not sleep when called from a pagefault_disabled()
|
|
|
|
* environment.
|
2006-12-07 12:32:20 +08:00
|
|
|
*/
|
|
|
|
static inline void pagefault_disable(void)
|
|
|
|
{
|
2015-05-11 23:52:06 +08:00
|
|
|
pagefault_disabled_inc();
|
2006-12-07 12:32:20 +08:00
|
|
|
/*
|
|
|
|
* make sure to have issued the store before a pagefault
|
|
|
|
* can hit.
|
|
|
|
*/
|
|
|
|
barrier();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void pagefault_enable(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* make sure to issue those last loads/stores before enabling
|
|
|
|
* the pagefault handler again.
|
|
|
|
*/
|
|
|
|
barrier();
|
2015-05-11 23:52:06 +08:00
|
|
|
pagefault_disabled_dec();
|
2006-12-07 12:32:20 +08:00
|
|
|
}
|
|
|
|
|
2015-05-11 23:52:06 +08:00
|
|
|
/*
|
|
|
|
* Is the pagefault handler disabled? If so, user access methods will not sleep.
|
|
|
|
*/
|
|
|
|
#define pagefault_disabled() (current->pagefault_disabled != 0)
|
|
|
|
|
2015-05-11 23:52:11 +08:00
|
|
|
/*
|
|
|
|
* The pagefault handler is in general disabled by pagefault_disable() or
|
|
|
|
* when in irq context (via in_atomic()).
|
|
|
|
*
|
|
|
|
* This function should only be used by the fault handlers. Other users should
|
|
|
|
* stick to pagefault_disabled().
|
|
|
|
* Please NEVER use preempt_disable() to disable the fault handler. With
|
|
|
|
* !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
|
|
|
|
* in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
|
|
|
|
*/
|
|
|
|
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
|
|
|
|
|
2006-06-23 17:04:16 +08:00
|
|
|
#ifndef ARCH_HAS_NOCACHE_UACCESS
|
|
|
|
|
|
|
|
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
|
|
|
|
const void __user *from, unsigned long n)
|
|
|
|
{
|
|
|
|
return __copy_from_user_inatomic(to, from, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* ARCH_HAS_NOCACHE_UACCESS */
|
|
|
|
|
2008-04-18 02:05:36 +08:00
|
|
|
/*
|
|
|
|
* probe_kernel_read(): safely attempt to read from a location
|
|
|
|
* @dst: pointer to the buffer that shall take the data
|
|
|
|
* @src: address to read from
|
|
|
|
* @size: size of the data chunk
|
|
|
|
*
|
|
|
|
* Safely read from address @src to the buffer at @dst. If a kernel fault
|
|
|
|
* happens, handle that and return -EFAULT.
|
|
|
|
*/
|
2011-05-20 02:35:33 +08:00
|
|
|
extern long probe_kernel_read(void *dst, const void *src, size_t size);
|
|
|
|
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
|
2008-04-18 02:05:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* probe_kernel_write(): safely attempt to write to a location
|
|
|
|
* @dst: address to write to
|
|
|
|
* @src: pointer to the data that shall be written
|
|
|
|
* @size: size of the data chunk
|
|
|
|
*
|
|
|
|
* Safely write to address @dst from the buffer at @src. If a kernel fault
|
|
|
|
* happens, handle that and return -EFAULT.
|
|
|
|
*/
|
2011-05-20 02:35:33 +08:00
|
|
|
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
|
|
|
|
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
|
2008-04-18 02:05:36 +08:00
|
|
|
|
2015-08-29 06:56:22 +08:00
|
|
|
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
|
|
|
|
|
2015-11-06 10:46:03 +08:00
|
|
|
/**
|
|
|
|
* probe_kernel_address(): safely attempt to read from a location
|
|
|
|
* @addr: address to read from
|
|
|
|
* @retval: read into this variable
|
|
|
|
*
|
|
|
|
* Returns 0 on success, or -EFAULT.
|
|
|
|
*/
|
|
|
|
#define probe_kernel_address(addr, retval) \
|
|
|
|
probe_kernel_read(&retval, addr, sizeof(retval))
|
|
|
|
|
2015-12-18 01:57:27 +08:00
|
|
|
#ifndef user_access_begin
|
|
|
|
#define user_access_begin() do { } while (0)
|
|
|
|
#define user_access_end() do { } while (0)
|
unsafe_[get|put]_user: change interface to use a error target label
When I initially added the unsafe_[get|put]_user() helpers in commit
5b24a7a2aa20 ("Add 'unsafe' user access functions for batched
accesses"), I made the mistake of modeling the interface on our
traditional __[get|put]_user() functions, which return zero on success,
or -EFAULT on failure.
That interface is fairly easy to use, but it's actually fairly nasty for
good code generation, since it essentially forces the caller to check
the error value for each access.
In particular, since the error handling is already internally
implemented with an exception handler, and we already use "asm goto" for
various other things, we could fairly easily make the error cases just
jump directly to an error label instead, and avoid the need for explicit
checking after each operation.
So switch the interface to pass in an error label, rather than checking
the error value in the caller. Best do it now before we start growing
more users (the signal handling code in particular would be a good place
to use the new interface).
So rather than
if (unsafe_get_user(x, ptr))
... handle error ..
the interface is now
unsafe_get_user(x, ptr, label);
where an error during the user mode fetch will now just cause a jump to
'label' in the caller.
Right now the actual _implementation_ of this all still ends up being a
"if (err) goto label", and does not take advantage of any exception
label tricks, but for "unsafe_put_user()" in particular it should be
fairly straightforward to convert to using the exception table model.
Note that "unsafe_get_user()" is much harder to convert to a clever
exception table model, because current versions of gcc do not allow the
use of "asm goto" (for the exception) with output values (for the actual
value to be fetched). But that is hopefully not a limitation in the
long term.
[ Also note that it might be a good idea to switch unsafe_get_user() to
actually _return_ the value it fetches from user space, but this
commit only changes the error handling semantics ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-08-09 04:02:01 +08:00
|
|
|
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
|
|
|
|
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
|
2015-12-18 01:57:27 +08:00
|
|
|
#endif
|
|
|
|
|
2006-06-23 17:04:16 +08:00
|
|
|
#endif /* __LINUX_UACCESS_H__ */
|