2008-10-23 13:26:29 +08:00
|
|
|
#ifndef _ASM_X86_PAGE_32_H
|
|
|
|
#define _ASM_X86_PAGE_32_H
|
2008-01-30 20:32:44 +08:00
|
|
|
|
2009-02-09 14:52:14 +08:00
|
|
|
#include <asm/page_32_types.h>
|
2008-02-09 07:15:06 +08:00
|
|
|
|
2009-02-13 20:36:47 +08:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
2008-01-30 20:32:44 +08:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-10-03 23:54:25 +08:00
|
|
|
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
|
2008-06-12 19:56:40 +08:00
|
|
|
#ifdef CONFIG_DEBUG_VIRTUAL
|
2008-06-12 19:56:40 +08:00
|
|
|
extern unsigned long __phys_addr(unsigned long);
|
2008-06-12 19:56:40 +08:00
|
|
|
#else
|
2008-10-03 23:54:25 +08:00
|
|
|
#define __phys_addr(x) __phys_addr_nodebug(x)
|
2008-06-12 19:56:40 +08:00
|
|
|
#endif
|
2012-11-17 05:55:46 +08:00
|
|
|
#define __phys_addr_symbol(x) __phys_addr(x)
|
2008-01-30 20:32:44 +08:00
|
|
|
#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
|
|
|
|
|
|
|
|
#ifdef CONFIG_FLATMEM
|
|
|
|
#define pfn_valid(pfn) ((pfn) < max_mapnr)
|
|
|
|
#endif /* CONFIG_FLATMEM */
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_USE_3DNOW
|
|
|
|
#include <asm/mmx.h>
|
[PATCH] vdso: randomize the i386 vDSO by moving it into a vma
Move the i386 VDSO down into a vma and thus randomize it.
Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.
It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).
There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO. Newer
distributions (using glibc 2.3.3 or later) can turn this option off. Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.
There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.
(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)
This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.
[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 17:53:50 +08:00
|
|
|
|
2008-01-30 20:32:44 +08:00
|
|
|
static inline void clear_page(void *page)
|
|
|
|
{
|
|
|
|
mmx_clear_page(page);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-30 20:32:44 +08:00
|
|
|
static inline void copy_page(void *to, void *from)
|
|
|
|
{
|
|
|
|
mmx_copy_page(to, from);
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_X86_USE_3DNOW */
|
|
|
|
#include <linux/string.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-30 20:32:44 +08:00
|
|
|
static inline void clear_page(void *page)
|
|
|
|
{
|
|
|
|
memset(page, 0, PAGE_SIZE);
|
|
|
|
}
|
2005-09-04 06:54:30 +08:00
|
|
|
|
2008-01-30 20:32:44 +08:00
|
|
|
static inline void copy_page(void *to, void *from)
|
|
|
|
{
|
|
|
|
memcpy(to, from, PAGE_SIZE);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_X86_3DNOW */
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
2006-04-27 22:48:08 +08:00
|
|
|
|
2008-10-23 13:26:29 +08:00
|
|
|
#endif /* _ASM_X86_PAGE_32_H */
|