Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "A number of fixes and some late updates: - make in_compat_syscall() behavior on x86-32 similar to other platforms, this touches a number of generic files but is not intended to impact non-x86 platforms. - objtool fixes - PAT preemption fix - paravirt fixes/cleanups - cpufeatures updates for new instructions - earlyprintk quirk - make microcode version in sysfs world-readable (it is already world-readable in procfs) - minor cleanups and fixes" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: compat: Cleanup in_compat_syscall() callers x86/compat: Adjust in_compat_syscall() to generic code under !COMPAT objtool: Support GCC 9 cold subfunction naming scheme x86/numa_emulation: Fix uniform-split numa emulation x86/paravirt: Remove unused _paravirt_ident_32 x86/mm/pat: Disable preemption around __flush_tlb_all() x86/paravirt: Remove GPL from pv_ops export x86/traps: Use format string with panic() call x86: Clean up 'sizeof x' => 'sizeof(x)' x86/cpufeatures: Enumerate MOVDIR64B instruction x86/cpufeatures: Enumerate MOVDIRI instruction x86/earlyprintk: Add a force option for pciserial device objtool: Support per-function rodata sections x86/microcode: Make revision and processor flags world-readable
This commit is contained in:
commit
601a88077c
|
@ -1068,7 +1068,7 @@
|
|||
earlyprintk=serial[,0x...[,baudrate]]
|
||||
earlyprintk=ttySn[,baudrate]
|
||||
earlyprintk=dbgp[debugController#]
|
||||
earlyprintk=pciserial,bus:device.function[,baudrate]
|
||||
earlyprintk=pciserial[,force],bus:device.function[,baudrate]
|
||||
earlyprintk=xdbc[xhciController#]
|
||||
|
||||
earlyprintk is useful when the kernel crashes before
|
||||
|
@ -1100,6 +1100,10 @@
|
|||
|
||||
The sclp output can only be used on s390.
|
||||
|
||||
The optional "force" to "pciserial" enables use of a
|
||||
PCI device even when its classcode is not of the
|
||||
UART class.
|
||||
|
||||
edac_report= [HW,EDAC] Control how to report EDAC event
|
||||
Format: {"on" | "off" | "force"}
|
||||
on: enable EDAC to report H/W event. May be overridden
|
||||
|
|
|
@ -113,7 +113,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
|
|||
{
|
||||
int err;
|
||||
|
||||
memset(&cpu.flags, 0, sizeof cpu.flags);
|
||||
memset(&cpu.flags, 0, sizeof(cpu.flags));
|
||||
cpu.level = 3;
|
||||
|
||||
if (has_eflag(X86_EFLAGS_AC))
|
||||
|
|
|
@ -50,7 +50,7 @@ static void parse_earlyprintk(void)
|
|||
int pos = 0;
|
||||
int port = 0;
|
||||
|
||||
if (cmdline_find_option("earlyprintk", arg, sizeof arg) > 0) {
|
||||
if (cmdline_find_option("earlyprintk", arg, sizeof(arg)) > 0) {
|
||||
char *e;
|
||||
|
||||
if (!strncmp(arg, "serial", 6)) {
|
||||
|
@ -124,7 +124,7 @@ static void parse_console_uart8250(void)
|
|||
* console=uart8250,io,0x3f8,115200n8
|
||||
* need to make sure it is last one console !
|
||||
*/
|
||||
if (cmdline_find_option("console", optstr, sizeof optstr) <= 0)
|
||||
if (cmdline_find_option("console", optstr, sizeof(optstr)) <= 0)
|
||||
return;
|
||||
|
||||
options = optstr;
|
||||
|
|
|
@ -76,7 +76,7 @@ static int get_edd_info(u8 devno, struct edd_info *ei)
|
|||
{
|
||||
struct biosregs ireg, oreg;
|
||||
|
||||
memset(ei, 0, sizeof *ei);
|
||||
memset(ei, 0, sizeof(*ei));
|
||||
|
||||
/* Check Extensions Present */
|
||||
|
||||
|
@ -133,7 +133,7 @@ void query_edd(void)
|
|||
struct edd_info ei, *edp;
|
||||
u32 *mbrptr;
|
||||
|
||||
if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) {
|
||||
if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) {
|
||||
if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) {
|
||||
do_edd = 1;
|
||||
do_mbr = 0;
|
||||
|
@ -166,7 +166,7 @@ void query_edd(void)
|
|||
*/
|
||||
if (!get_edd_info(devno, &ei)
|
||||
&& boot_params.eddbuf_entries < EDDMAXNR) {
|
||||
memcpy(edp, &ei, sizeof ei);
|
||||
memcpy(edp, &ei, sizeof(ei));
|
||||
edp++;
|
||||
boot_params.eddbuf_entries++;
|
||||
}
|
||||
|
|
|
@ -36,8 +36,8 @@ static void copy_boot_params(void)
|
|||
const struct old_cmdline * const oldcmd =
|
||||
(const struct old_cmdline *)OLD_CL_ADDRESS;
|
||||
|
||||
BUILD_BUG_ON(sizeof boot_params != 4096);
|
||||
memcpy(&boot_params.hdr, &hdr, sizeof hdr);
|
||||
BUILD_BUG_ON(sizeof(boot_params) != 4096);
|
||||
memcpy(&boot_params.hdr, &hdr, sizeof(hdr));
|
||||
|
||||
if (!boot_params.hdr.cmd_line_ptr &&
|
||||
oldcmd->cl_magic == OLD_CL_MAGIC) {
|
||||
|
|
|
@ -26,7 +26,7 @@ static int detect_memory_e820(void)
|
|||
|
||||
initregs(&ireg);
|
||||
ireg.ax = 0xe820;
|
||||
ireg.cx = sizeof buf;
|
||||
ireg.cx = sizeof(buf);
|
||||
ireg.edx = SMAP;
|
||||
ireg.di = (size_t)&buf;
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
void initregs(struct biosregs *reg)
|
||||
{
|
||||
memset(reg, 0, sizeof *reg);
|
||||
memset(reg, 0, sizeof(*reg));
|
||||
reg->eflags |= X86_EFLAGS_CF;
|
||||
reg->ds = ds();
|
||||
reg->es = ds();
|
||||
|
|
|
@ -62,7 +62,7 @@ static int vesa_probe(void)
|
|||
if (mode & ~0x1ff)
|
||||
continue;
|
||||
|
||||
memset(&vminfo, 0, sizeof vminfo); /* Just in case... */
|
||||
memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */
|
||||
|
||||
ireg.ax = 0x4f01;
|
||||
ireg.cx = mode;
|
||||
|
@ -109,7 +109,7 @@ static int vesa_set_mode(struct mode_info *mode)
|
|||
int is_graphic;
|
||||
u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA;
|
||||
|
||||
memset(&vminfo, 0, sizeof vminfo); /* Just in case... */
|
||||
memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */
|
||||
|
||||
initregs(&ireg);
|
||||
ireg.ax = 0x4f01;
|
||||
|
@ -241,7 +241,7 @@ void vesa_store_edid(void)
|
|||
struct biosregs ireg, oreg;
|
||||
|
||||
/* Apparently used as a nonsense token... */
|
||||
memset(&boot_params.edid_info, 0x13, sizeof boot_params.edid_info);
|
||||
memset(&boot_params.edid_info, 0x13, sizeof(boot_params.edid_info));
|
||||
|
||||
if (vginfo.version < 0x0200)
|
||||
return; /* EDID requires VBE 2.0+ */
|
||||
|
|
|
@ -115,7 +115,7 @@ static unsigned int get_entry(void)
|
|||
} else if ((key >= '0' && key <= '9') ||
|
||||
(key >= 'A' && key <= 'Z') ||
|
||||
(key >= 'a' && key <= 'z')) {
|
||||
if (len < sizeof entry_buf) {
|
||||
if (len < sizeof(entry_buf)) {
|
||||
entry_buf[len++] = key;
|
||||
putchar(key);
|
||||
}
|
||||
|
|
|
@ -4535,7 +4535,7 @@ __init int intel_pmu_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name);
|
||||
snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
|
||||
|
||||
if (version >= 2 && extra_attr) {
|
||||
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
|
||||
|
|
|
@ -217,11 +217,18 @@ static inline bool in_x32_syscall(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool in_compat_syscall(void)
|
||||
static inline bool in_32bit_syscall(void)
|
||||
{
|
||||
return in_ia32_syscall() || in_x32_syscall();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static inline bool in_compat_syscall(void)
|
||||
{
|
||||
return in_32bit_syscall();
|
||||
}
|
||||
#define in_compat_syscall in_compat_syscall /* override the generic impl */
|
||||
#endif
|
||||
|
||||
struct compat_siginfo;
|
||||
int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
|
||||
|
|
|
@ -331,6 +331,8 @@
|
|||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
||||
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
|
||||
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
|
||||
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
|
||||
|
|
|
@ -76,9 +76,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
|
|||
#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
|
||||
static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
|
||||
{
|
||||
if (in_compat_syscall())
|
||||
return true;
|
||||
return false;
|
||||
return in_32bit_syscall();
|
||||
}
|
||||
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
|
||||
#endif /* !COMPILE_OFFSETS */
|
||||
|
|
|
@ -361,7 +361,6 @@ extern struct paravirt_patch_template pv_ops;
|
|||
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
|
||||
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_default(u8 type, void *insnbuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
@ -651,7 +650,6 @@ void paravirt_leave_lazy_mmu(void);
|
|||
void paravirt_flush_lazy_mmu(void);
|
||||
|
||||
void _paravirt_nop(void);
|
||||
u32 _paravirt_ident_32(u32);
|
||||
u64 _paravirt_ident_64(u64);
|
||||
|
||||
#define paravirt_nop ((void *)_paravirt_nop)
|
||||
|
|
|
@ -453,6 +453,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr)
|
|||
*/
|
||||
static inline void __flush_tlb_all(void)
|
||||
{
|
||||
/*
|
||||
* This is to catch users with enabled preemption and the PGE feature
|
||||
* and don't trigger the warning in __native_flush_tlb().
|
||||
*/
|
||||
VM_WARN_ON_ONCE(preemptible());
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PGE)) {
|
||||
__flush_tlb_global();
|
||||
} else {
|
||||
|
|
|
@ -1074,7 +1074,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
|||
#endif
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
memset(&c->x86_capability, 0, sizeof(c->x86_capability));
|
||||
c->extended_cpuid_level = 0;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
|
@ -1317,7 +1317,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
|||
c->x86_virt_bits = 32;
|
||||
#endif
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
memset(&c->x86_capability, 0, sizeof(c->x86_capability));
|
||||
|
||||
generic_identify(c);
|
||||
|
||||
|
|
|
@ -2215,7 +2215,7 @@ static int mce_device_create(unsigned int cpu)
|
|||
if (dev)
|
||||
return 0;
|
||||
|
||||
dev = kzalloc(sizeof *dev, GFP_KERNEL);
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
dev->id = cpu;
|
||||
|
|
|
@ -666,8 +666,8 @@ static ssize_t pf_show(struct device *dev,
|
|||
}
|
||||
|
||||
static DEVICE_ATTR_WO(reload);
|
||||
static DEVICE_ATTR(version, 0400, version_show, NULL);
|
||||
static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
|
||||
static DEVICE_ATTR(version, 0444, version_show, NULL);
|
||||
static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);
|
||||
|
||||
static struct attribute *mc_default_attrs[] = {
|
||||
&dev_attr_version.attr,
|
||||
|
|
|
@ -798,7 +798,7 @@ static void generic_set_all(void)
|
|||
local_irq_restore(flags);
|
||||
|
||||
/* Use the atomic bitops to update the global mask */
|
||||
for (count = 0; count < sizeof mask * 8; ++count) {
|
||||
for (count = 0; count < sizeof(mask) * 8; ++count) {
|
||||
if (mask & 0x01)
|
||||
set_bit(count, &smp_changes_mask);
|
||||
mask >>= 1;
|
||||
|
|
|
@ -174,12 +174,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
|
|||
case MTRRIOC_SET_PAGE_ENTRY:
|
||||
case MTRRIOC_DEL_PAGE_ENTRY:
|
||||
case MTRRIOC_KILL_PAGE_ENTRY:
|
||||
if (copy_from_user(&sentry, arg, sizeof sentry))
|
||||
if (copy_from_user(&sentry, arg, sizeof(sentry)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
case MTRRIOC_GET_ENTRY:
|
||||
case MTRRIOC_GET_PAGE_ENTRY:
|
||||
if (copy_from_user(&gentry, arg, sizeof gentry))
|
||||
if (copy_from_user(&gentry, arg, sizeof(gentry)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -332,7 +332,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
|
|||
switch (cmd) {
|
||||
case MTRRIOC_GET_ENTRY:
|
||||
case MTRRIOC_GET_PAGE_ENTRY:
|
||||
if (copy_to_user(arg, &gentry, sizeof gentry))
|
||||
if (copy_to_user(arg, &gentry, sizeof(gentry)))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
|
|
@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset)
|
|||
* early_pci_serial_init()
|
||||
*
|
||||
* This function is invoked when the early_printk param starts with "pciserial"
|
||||
* The rest of the param should be ",B:D.F,baud" where B, D & F describe the
|
||||
* location of a PCI device that must be a UART device.
|
||||
* The rest of the param should be "[force],B:D.F,baud", where B, D & F describe
|
||||
* the location of a PCI device that must be a UART device. "force" is optional
|
||||
* and overrides the use of an UART device with a wrong PCI class code.
|
||||
*/
|
||||
static __init void early_pci_serial_init(char *s)
|
||||
{
|
||||
|
@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s)
|
|||
u32 classcode, bar0;
|
||||
u16 cmdreg;
|
||||
char *e;
|
||||
int force = 0;
|
||||
|
||||
|
||||
/*
|
||||
* First, part the param to get the BDF values
|
||||
*/
|
||||
if (*s == ',')
|
||||
++s;
|
||||
|
||||
if (*s == 0)
|
||||
return;
|
||||
|
||||
/* Force the use of an UART device with wrong class code */
|
||||
if (!strncmp(s, "force,", 6)) {
|
||||
force = 1;
|
||||
s += 6;
|
||||
}
|
||||
|
||||
/*
|
||||
* Part the param to get the BDF values
|
||||
*/
|
||||
bus = (u8)simple_strtoul(s, &e, 16);
|
||||
s = e;
|
||||
if (*s != ':')
|
||||
|
@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s)
|
|||
s++;
|
||||
|
||||
/*
|
||||
* Second, find the device from the BDF
|
||||
* Find the device from the BDF
|
||||
*/
|
||||
cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND);
|
||||
classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
|
||||
|
@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s)
|
|||
*/
|
||||
if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) &&
|
||||
(classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) ||
|
||||
(((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */
|
||||
(((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ {
|
||||
if (!force)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if it is IO or memory mapped
|
||||
|
@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s)
|
|||
}
|
||||
|
||||
/*
|
||||
* Lastly, initialize the hardware
|
||||
* Initialize the hardware
|
||||
*/
|
||||
if (*s) {
|
||||
if (strcmp(s, "nocfg") == 0)
|
||||
|
|
|
@ -385,7 +385,7 @@ static void __init copy_bootdata(char *real_mode_data)
|
|||
*/
|
||||
sme_map_bootdata(real_mode_data);
|
||||
|
||||
memcpy(&boot_params, real_mode_data, sizeof boot_params);
|
||||
memcpy(&boot_params, real_mode_data, sizeof(boot_params));
|
||||
sanitize_boot_params(&boot_params);
|
||||
cmd_line_ptr = get_cmd_line_ptr();
|
||||
if (cmd_line_ptr) {
|
||||
|
|
|
@ -115,14 +115,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
|
|||
err = -EBADF;
|
||||
break;
|
||||
}
|
||||
if (copy_from_user(®s, uregs, sizeof regs)) {
|
||||
if (copy_from_user(®s, uregs, sizeof(regs))) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
err = rdmsr_safe_regs_on_cpu(cpu, regs);
|
||||
if (err)
|
||||
break;
|
||||
if (copy_to_user(uregs, ®s, sizeof regs))
|
||||
if (copy_to_user(uregs, ®s, sizeof(regs)))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
|
@ -131,14 +131,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
|
|||
err = -EBADF;
|
||||
break;
|
||||
}
|
||||
if (copy_from_user(®s, uregs, sizeof regs)) {
|
||||
if (copy_from_user(®s, uregs, sizeof(regs))) {
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
err = wrmsr_safe_regs_on_cpu(cpu, regs);
|
||||
if (err)
|
||||
break;
|
||||
if (copy_to_user(uregs, ®s, sizeof regs))
|
||||
if (copy_to_user(uregs, ®s, sizeof(regs)))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
|
|
|
@ -56,17 +56,6 @@ asm (".pushsection .entry.text, \"ax\"\n"
|
|||
".type _paravirt_nop, @function\n\t"
|
||||
".popsection");
|
||||
|
||||
/* identity function, which can be inlined */
|
||||
u32 notrace _paravirt_ident_32(u32 x)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
u64 notrace _paravirt_ident_64(u64 x)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
void __init default_banner(void)
|
||||
{
|
||||
printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
|
||||
|
@ -102,6 +91,12 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
/* identity function, which can be inlined */
|
||||
u64 notrace _paravirt_ident_64(u64 x)
|
||||
{
|
||||
return x;
|
||||
}
|
||||
|
||||
static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
||||
unsigned long addr, unsigned len)
|
||||
{
|
||||
|
@ -146,13 +141,11 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf,
|
|||
else if (opfunc == _paravirt_nop)
|
||||
ret = 0;
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
/* identity functions just return their single argument */
|
||||
else if (opfunc == _paravirt_ident_32)
|
||||
ret = paravirt_patch_ident_32(insnbuf, len);
|
||||
else if (opfunc == _paravirt_ident_64)
|
||||
ret = paravirt_patch_ident_64(insnbuf, len);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
else if (type == PARAVIRT_PATCH(cpu.iret) ||
|
||||
type == PARAVIRT_PATCH(cpu.usergs_sysret64))
|
||||
/* If operation requires a jmp, then jmp */
|
||||
|
@ -309,13 +302,8 @@ struct pv_info pv_info = {
|
|||
#endif
|
||||
};
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
|
||||
/* 32-bit pagetable entries */
|
||||
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
|
||||
#else
|
||||
/* 64-bit pagetable entries */
|
||||
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
|
||||
#endif
|
||||
|
||||
struct paravirt_patch_template pv_ops = {
|
||||
/* Init ops. */
|
||||
|
@ -483,5 +471,5 @@ NOKPROBE_SYMBOL(native_set_debugreg);
|
|||
NOKPROBE_SYMBOL(native_load_idt);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL_GPL(pv_ops);
|
||||
EXPORT_SYMBOL(pv_ops);
|
||||
EXPORT_SYMBOL_GPL(pv_info);
|
||||
|
|
|
@ -10,24 +10,18 @@ DEF_NATIVE(cpu, iret, "iret");
|
|||
DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
|
||||
DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
|
||||
DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
|
||||
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
||||
#endif
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
||||
{
|
||||
/* arg in %eax, return in %eax */
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||
{
|
||||
/* arg in %edx:%eax, return in %edx:%eax */
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
|
||||
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
||||
#endif
|
||||
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
|
|
@ -15,27 +15,19 @@ DEF_NATIVE(cpu, wbinvd, "wbinvd");
|
|||
|
||||
DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
|
||||
DEF_NATIVE(cpu, swapgs, "swapgs");
|
||||
#endif
|
||||
|
||||
DEF_NATIVE(, mov32, "mov %edi, %eax");
|
||||
DEF_NATIVE(, mov64, "mov %rdi, %rax");
|
||||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
|
||||
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
||||
#endif
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
|
||||
{
|
||||
return paravirt_patch_insns(insnbuf, len,
|
||||
start__mov32, end__mov32);
|
||||
}
|
||||
|
||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||
{
|
||||
return paravirt_patch_insns(insnbuf, len,
|
||||
start__mov64, end__mov64);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
||||
DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
|
||||
DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
|
||||
#endif
|
||||
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
|
|
@ -701,10 +701,10 @@ static void __set_personality_x32(void)
|
|||
current->mm->context.ia32_compat = TIF_X32;
|
||||
current->personality &= ~READ_IMPLIES_EXEC;
|
||||
/*
|
||||
* in_compat_syscall() uses the presence of the x32 syscall bit
|
||||
* in_32bit_syscall() uses the presence of the x32 syscall bit
|
||||
* flag to determine compat status. The x86 mmap() code relies on
|
||||
* the syscall bitness so set x32 syscall bit right here to make
|
||||
* in_compat_syscall() work during exec().
|
||||
* in_32bit_syscall() work during exec().
|
||||
*
|
||||
* Pretend to come from a x32 execve.
|
||||
*/
|
||||
|
|
|
@ -105,7 +105,7 @@ out:
|
|||
static void find_start_end(unsigned long addr, unsigned long flags,
|
||||
unsigned long *begin, unsigned long *end)
|
||||
{
|
||||
if (!in_compat_syscall() && (flags & MAP_32BIT)) {
|
||||
if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
|
||||
/* This is usually used needed to map code in small
|
||||
model, so it needs to be in the first 31bit. Limit
|
||||
it to that. This means we need to move the
|
||||
|
@ -122,7 +122,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
|
|||
}
|
||||
|
||||
*begin = get_mmap_base(1);
|
||||
if (in_compat_syscall())
|
||||
if (in_32bit_syscall())
|
||||
*end = task_size_32bit();
|
||||
else
|
||||
*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
|
||||
|
@ -193,7 +193,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
|
|||
return addr;
|
||||
|
||||
/* for MAP_32BIT mappings we force the legacy mmap base */
|
||||
if (!in_compat_syscall() && (flags & MAP_32BIT))
|
||||
if (!in_32bit_syscall() && (flags & MAP_32BIT))
|
||||
goto bottomup;
|
||||
|
||||
/* requesting a specific address */
|
||||
|
@ -217,9 +217,10 @@ get_unmapped_area:
|
|||
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
|
||||
* in the full address space.
|
||||
*
|
||||
* !in_compat_syscall() check to avoid high addresses for x32.
|
||||
* !in_32bit_syscall() check to avoid high addresses for x32
|
||||
* (and make it no op on native i386).
|
||||
*/
|
||||
if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
|
||||
if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
|
||||
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
|
||||
|
||||
info.align_mask = 0;
|
||||
|
|
|
@ -306,7 +306,7 @@ __visible void __noreturn handle_stack_overflow(const char *message,
|
|||
die(message, regs, 0);
|
||||
|
||||
/* Be absolutely certain we don't return. */
|
||||
panic(message);
|
||||
panic("%s", message);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1509,7 +1509,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||
return emulate_gp(ctxt, index << 3 | 0x2);
|
||||
|
||||
addr = dt.address + index * 8;
|
||||
return linear_read_system(ctxt, addr, desc, sizeof *desc);
|
||||
return linear_read_system(ctxt, addr, desc, sizeof(*desc));
|
||||
}
|
||||
|
||||
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
|
||||
|
@ -1522,7 +1522,7 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
|
|||
struct desc_struct desc;
|
||||
u16 sel;
|
||||
|
||||
memset (dt, 0, sizeof *dt);
|
||||
memset(dt, 0, sizeof(*dt));
|
||||
if (!ops->get_segment(ctxt, &sel, &desc, &base3,
|
||||
VCPU_SREG_LDTR))
|
||||
return;
|
||||
|
@ -1586,7 +1586,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
return linear_write_system(ctxt, addr, desc, sizeof *desc);
|
||||
return linear_write_system(ctxt, addr, desc, sizeof(*desc));
|
||||
}
|
||||
|
||||
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||
|
@ -1604,7 +1604,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
|||
u16 dummy;
|
||||
u32 base3 = 0;
|
||||
|
||||
memset(&seg_desc, 0, sizeof seg_desc);
|
||||
memset(&seg_desc, 0, sizeof(seg_desc));
|
||||
|
||||
if (ctxt->mode == X86EMUL_MODE_REAL) {
|
||||
/* set real mode segment descriptor (keep limit etc. for
|
||||
|
@ -3075,17 +3075,17 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
|
|||
int ret;
|
||||
u32 new_tss_base = get_desc_base(new_desc);
|
||||
|
||||
ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
|
||||
ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
|
||||
save_state_to_tss16(ctxt, &tss_seg);
|
||||
|
||||
ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
|
||||
ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
|
||||
ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
|
||||
ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
|
||||
|
@ -3094,7 +3094,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
|
|||
|
||||
ret = linear_write_system(ctxt, new_tss_base,
|
||||
&tss_seg.prev_task_link,
|
||||
sizeof tss_seg.prev_task_link);
|
||||
sizeof(tss_seg.prev_task_link));
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
}
|
||||
|
@ -3216,7 +3216,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
|
|||
u32 eip_offset = offsetof(struct tss_segment_32, eip);
|
||||
u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
|
||||
|
||||
ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
|
||||
ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
|
||||
|
@ -3228,7 +3228,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
|
|||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
|
||||
ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
|
||||
ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
|
||||
|
@ -3237,7 +3237,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
|
|||
|
||||
ret = linear_write_system(ctxt, new_tss_base,
|
||||
&tss_seg.prev_task_link,
|
||||
sizeof tss_seg.prev_task_link);
|
||||
sizeof(tss_seg.prev_task_link));
|
||||
if (ret != X86EMUL_CONTINUE)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2409,7 +2409,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
|
|||
r = kvm_apic_state_fixup(vcpu, s, true);
|
||||
if (r)
|
||||
return r;
|
||||
memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
|
||||
memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
|
||||
|
||||
recalculate_apic_map(vcpu->kvm);
|
||||
kvm_apic_set_version(vcpu);
|
||||
|
|
|
@ -2924,7 +2924,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
|
|||
unsigned size;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&msrs, user_msrs, sizeof msrs))
|
||||
if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
|
||||
goto out;
|
||||
|
||||
r = -E2BIG;
|
||||
|
@ -3091,11 +3091,11 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|||
unsigned n;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
|
||||
if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
|
||||
goto out;
|
||||
n = msr_list.nmsrs;
|
||||
msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
|
||||
if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
|
||||
if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
|
||||
goto out;
|
||||
r = -E2BIG;
|
||||
if (n < msr_list.nmsrs)
|
||||
|
@ -3117,7 +3117,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|||
struct kvm_cpuid2 cpuid;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
||||
goto out;
|
||||
|
||||
r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
|
||||
|
@ -3126,7 +3126,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|||
goto out;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
|
||||
if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
|
@ -3894,7 +3894,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
struct kvm_interrupt irq;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&irq, argp, sizeof irq))
|
||||
if (copy_from_user(&irq, argp, sizeof(irq)))
|
||||
goto out;
|
||||
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
|
||||
break;
|
||||
|
@ -3912,7 +3912,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
struct kvm_cpuid cpuid;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
||||
goto out;
|
||||
r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
|
||||
break;
|
||||
|
@ -3922,7 +3922,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
struct kvm_cpuid2 cpuid;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
||||
goto out;
|
||||
r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
|
||||
cpuid_arg->entries);
|
||||
|
@ -3933,14 +3933,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
struct kvm_cpuid2 cpuid;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
|
||||
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
|
||||
goto out;
|
||||
r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
|
||||
cpuid_arg->entries);
|
||||
if (r)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
|
||||
if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
|
@ -3961,13 +3961,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
struct kvm_tpr_access_ctl tac;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&tac, argp, sizeof tac))
|
||||
if (copy_from_user(&tac, argp, sizeof(tac)))
|
||||
goto out;
|
||||
r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
|
||||
if (r)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(argp, &tac, sizeof tac))
|
||||
if (copy_to_user(argp, &tac, sizeof(tac)))
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
|
@ -3980,7 +3980,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
if (!lapic_in_kernel(vcpu))
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&va, argp, sizeof va))
|
||||
if (copy_from_user(&va, argp, sizeof(va)))
|
||||
goto out;
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
|
||||
|
@ -3991,7 +3991,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
u64 mcg_cap;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
|
||||
if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
|
||||
goto out;
|
||||
r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
|
||||
break;
|
||||
|
@ -4000,7 +4000,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
struct kvm_x86_mce mce;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&mce, argp, sizeof mce))
|
||||
if (copy_from_user(&mce, argp, sizeof(mce)))
|
||||
goto out;
|
||||
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
|
||||
break;
|
||||
|
@ -4536,7 +4536,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
if (kvm->created_vcpus)
|
||||
goto set_identity_unlock;
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
|
||||
if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
|
||||
goto set_identity_unlock;
|
||||
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
|
||||
set_identity_unlock:
|
||||
|
@ -4620,7 +4620,7 @@ set_identity_unlock:
|
|||
if (r)
|
||||
goto get_irqchip_out;
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(argp, chip, sizeof *chip))
|
||||
if (copy_to_user(argp, chip, sizeof(*chip)))
|
||||
goto get_irqchip_out;
|
||||
r = 0;
|
||||
get_irqchip_out:
|
||||
|
@ -4666,7 +4666,7 @@ set_identity_unlock:
|
|||
}
|
||||
case KVM_SET_PIT: {
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&u.ps, argp, sizeof u.ps))
|
||||
if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
|
||||
goto out;
|
||||
r = -ENXIO;
|
||||
if (!kvm->arch.vpit)
|
||||
|
@ -8205,7 +8205,7 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|||
sregs->efer = vcpu->arch.efer;
|
||||
sregs->apic_base = kvm_get_apic_base(vcpu);
|
||||
|
||||
memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
|
||||
memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
|
||||
|
||||
if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
|
||||
set_bit(vcpu->arch.interrupt.nr,
|
||||
|
@ -8509,7 +8509,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
|||
fpu->last_opcode = fxsave->fop;
|
||||
fpu->last_ip = fxsave->rip;
|
||||
fpu->last_dp = fxsave->rdp;
|
||||
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
|
||||
memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
|
||||
|
||||
vcpu_put(vcpu);
|
||||
return 0;
|
||||
|
@ -8530,7 +8530,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
|||
fxsave->fop = fpu->last_opcode;
|
||||
fxsave->rip = fpu->last_ip;
|
||||
fxsave->rdp = fpu->last_dp;
|
||||
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
|
||||
memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
|
||||
|
||||
vcpu_put(vcpu);
|
||||
return 0;
|
||||
|
|
|
@ -92,7 +92,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
|
|||
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
|
||||
* in the full address space.
|
||||
*/
|
||||
info.high_limit = in_compat_syscall() ?
|
||||
info.high_limit = in_32bit_syscall() ?
|
||||
task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
|
||||
|
||||
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
||||
|
@ -116,7 +116,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
|
|||
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
|
||||
* in the full address space.
|
||||
*/
|
||||
if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
|
||||
if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
|
||||
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
|
||||
|
||||
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
|
||||
|
|
|
@ -166,7 +166,7 @@ unsigned long get_mmap_base(int is_legacy)
|
|||
struct mm_struct *mm = current->mm;
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
|
||||
if (in_compat_syscall()) {
|
||||
if (in_32bit_syscall()) {
|
||||
return is_legacy ? mm->mmap_compat_legacy_base
|
||||
: mm->mmap_compat_base;
|
||||
}
|
||||
|
|
|
@ -399,9 +399,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
|
|||
n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
|
||||
ret = -1;
|
||||
for_each_node_mask(i, physnode_mask) {
|
||||
/*
|
||||
* The reason we pass in blk[0] is due to
|
||||
* numa_remove_memblk_from() called by
|
||||
* emu_setup_memblk() will delete entry 0
|
||||
* and then move everything else up in the pi.blk
|
||||
* array. Therefore we should always be looking
|
||||
* at blk[0].
|
||||
*/
|
||||
ret = split_nodes_size_interleave_uniform(&ei, &pi,
|
||||
pi.blk[i].start, pi.blk[i].end, 0,
|
||||
n, &pi.blk[i], nid);
|
||||
pi.blk[0].start, pi.blk[0].end, 0,
|
||||
n, &pi.blk[0], nid);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (ret < n) {
|
||||
|
|
|
@ -2309,9 +2309,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
|||
|
||||
/*
|
||||
* We should perform an IPI and flush all tlbs,
|
||||
* but that can deadlock->flush only current cpu:
|
||||
* but that can deadlock->flush only current cpu.
|
||||
* Preemption needs to be disabled around __flush_tlb_all() due to
|
||||
* CR3 reload in __native_flush_tlb().
|
||||
*/
|
||||
preempt_disable();
|
||||
__flush_tlb_all();
|
||||
preempt_enable();
|
||||
|
||||
arch_flush_lazy_mmu_mode();
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ static void regex_init(int use_real_mode)
|
|||
REG_EXTENDED|REG_NOSUB);
|
||||
|
||||
if (err) {
|
||||
regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
|
||||
regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf));
|
||||
die("%s", errbuf);
|
||||
}
|
||||
}
|
||||
|
@ -405,7 +405,7 @@ static void read_shdrs(FILE *fp)
|
|||
}
|
||||
for (i = 0; i < ehdr.e_shnum; i++) {
|
||||
struct section *sec = &secs[i];
|
||||
if (fread(&shdr, sizeof shdr, 1, fp) != 1)
|
||||
if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
|
||||
die("Cannot read ELF section headers %d/%d: %s\n",
|
||||
i, ehdr.e_shnum, strerror(errno));
|
||||
sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
|
||||
|
|
|
@ -194,7 +194,7 @@ extern unsigned long um_vdso_addr;
|
|||
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
|
||||
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
typedef struct user_i387_struct elf_fpregset_t;
|
||||
|
|
|
@ -229,14 +229,6 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool is_compat(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall())
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src)
|
||||
{
|
||||
|
@ -263,7 +255,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
|
|||
u8 *data;
|
||||
int err;
|
||||
|
||||
if (is_compat()) {
|
||||
if (in_compat_syscall()) {
|
||||
struct compat_efi_variable *compat;
|
||||
|
||||
if (count != sizeof(*compat))
|
||||
|
@ -324,7 +316,7 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
|
|||
&entry->var.DataSize, entry->var.Data))
|
||||
return -EIO;
|
||||
|
||||
if (is_compat()) {
|
||||
if (in_compat_syscall()) {
|
||||
compat = (struct compat_efi_variable *)buf;
|
||||
|
||||
size = sizeof(*compat);
|
||||
|
@ -418,7 +410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
|
|||
struct compat_efi_variable *compat = (struct compat_efi_variable *)buf;
|
||||
struct efi_variable *new_var = (struct efi_variable *)buf;
|
||||
struct efivar_entry *new_entry;
|
||||
bool need_compat = is_compat();
|
||||
bool need_compat = in_compat_syscall();
|
||||
efi_char16_t *name;
|
||||
unsigned long size;
|
||||
u32 attributes;
|
||||
|
@ -495,7 +487,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
|
|||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (is_compat()) {
|
||||
if (in_compat_syscall()) {
|
||||
if (count != sizeof(*compat))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -1032,9 +1032,9 @@ int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
|
|||
#else /* !CONFIG_COMPAT */
|
||||
|
||||
#define is_compat_task() (0)
|
||||
#ifndef in_compat_syscall
|
||||
/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */
|
||||
#define in_compat_syscall in_compat_syscall
|
||||
static inline bool in_compat_syscall(void) { return false; }
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
|
|
|
@ -842,7 +842,7 @@ int get_timespec64(struct timespec64 *ts,
|
|||
ts->tv_sec = kts.tv_sec;
|
||||
|
||||
/* Zero out the padding for 32 bit systems or in compat mode */
|
||||
if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()))
|
||||
if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall())
|
||||
kts.tv_nsec &= 0xFFFFFFFFUL;
|
||||
|
||||
ts->tv_nsec = kts.tv_nsec;
|
||||
|
|
|
@ -2077,10 +2077,8 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
|
|||
struct xfrm_mgr *km;
|
||||
struct xfrm_policy *pol = NULL;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (in_compat_syscall())
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
|
||||
if (!optval && !optlen) {
|
||||
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
|
||||
|
|
|
@ -2621,10 +2621,8 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
const struct xfrm_link *link;
|
||||
int type, err;
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (in_compat_syscall())
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
|
||||
type = nlh->nlmsg_type;
|
||||
if (type > XFRM_MSG_MAX)
|
||||
|
|
|
@ -836,7 +836,7 @@ static int add_switch_table(struct objtool_file *file, struct instruction *insn,
|
|||
struct symbol *pfunc = insn->func->pfunc;
|
||||
unsigned int prev_offset = 0;
|
||||
|
||||
list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
|
||||
list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) {
|
||||
if (rela == next_table)
|
||||
break;
|
||||
|
||||
|
@ -926,6 +926,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
|
|||
{
|
||||
struct rela *text_rela, *rodata_rela;
|
||||
struct instruction *orig_insn = insn;
|
||||
struct section *rodata_sec;
|
||||
unsigned long table_offset;
|
||||
|
||||
/*
|
||||
|
@ -953,10 +954,13 @@ static struct rela *find_switch_table(struct objtool_file *file,
|
|||
/* look for a relocation which references .rodata */
|
||||
text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
|
||||
insn->len);
|
||||
if (!text_rela || text_rela->sym != file->rodata->sym)
|
||||
if (!text_rela || text_rela->sym->type != STT_SECTION ||
|
||||
!text_rela->sym->sec->rodata)
|
||||
continue;
|
||||
|
||||
table_offset = text_rela->addend;
|
||||
rodata_sec = text_rela->sym->sec;
|
||||
|
||||
if (text_rela->type == R_X86_64_PC32)
|
||||
table_offset += 4;
|
||||
|
||||
|
@ -964,10 +968,10 @@ static struct rela *find_switch_table(struct objtool_file *file,
|
|||
* Make sure the .rodata address isn't associated with a
|
||||
* symbol. gcc jump tables are anonymous data.
|
||||
*/
|
||||
if (find_symbol_containing(file->rodata, table_offset))
|
||||
if (find_symbol_containing(rodata_sec, table_offset))
|
||||
continue;
|
||||
|
||||
rodata_rela = find_rela_by_dest(file->rodata, table_offset);
|
||||
rodata_rela = find_rela_by_dest(rodata_sec, table_offset);
|
||||
if (rodata_rela) {
|
||||
/*
|
||||
* Use of RIP-relative switch jumps is quite rare, and
|
||||
|
@ -1052,7 +1056,7 @@ static int add_switch_table_alts(struct objtool_file *file)
|
|||
struct symbol *func;
|
||||
int ret;
|
||||
|
||||
if (!file->rodata || !file->rodata->rela)
|
||||
if (!file->rodata)
|
||||
return 0;
|
||||
|
||||
for_each_sec(file, sec) {
|
||||
|
@ -1198,10 +1202,33 @@ static int read_retpoline_hints(struct objtool_file *file)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mark_rodata(struct objtool_file *file)
|
||||
{
|
||||
struct section *sec;
|
||||
bool found = false;
|
||||
|
||||
/*
|
||||
* This searches for the .rodata section or multiple .rodata.func_name
|
||||
* sections if -fdata-sections is being used. The .str.1.1 and .str.1.8
|
||||
* rodata sections are ignored as they don't contain jump tables.
|
||||
*/
|
||||
for_each_sec(file, sec) {
|
||||
if (!strncmp(sec->name, ".rodata", 7) &&
|
||||
!strstr(sec->name, ".str1.")) {
|
||||
sec->rodata = true;
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
file->rodata = found;
|
||||
}
|
||||
|
||||
static int decode_sections(struct objtool_file *file)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mark_rodata(file);
|
||||
|
||||
ret = decode_instructions(file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2171,7 +2198,6 @@ int check(const char *_objname, bool orc)
|
|||
INIT_LIST_HEAD(&file.insn_list);
|
||||
hash_init(file.insn_hash);
|
||||
file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
|
||||
file.rodata = find_section_by_name(file.elf, ".rodata");
|
||||
file.c_file = find_section_by_name(file.elf, ".comment");
|
||||
file.ignore_unreachables = no_unreachable;
|
||||
file.hints = false;
|
||||
|
|
|
@ -60,8 +60,8 @@ struct objtool_file {
|
|||
struct elf *elf;
|
||||
struct list_head insn_list;
|
||||
DECLARE_HASHTABLE(insn_hash, 16);
|
||||
struct section *rodata, *whitelist;
|
||||
bool ignore_unreachables, c_file, hints;
|
||||
struct section *whitelist;
|
||||
bool ignore_unreachables, c_file, hints, rodata;
|
||||
};
|
||||
|
||||
int check(const char *objname, bool orc);
|
||||
|
|
|
@ -301,7 +301,7 @@ static int read_symbols(struct elf *elf)
|
|||
if (sym->type != STT_FUNC)
|
||||
continue;
|
||||
sym->pfunc = sym->cfunc = sym;
|
||||
coldstr = strstr(sym->name, ".cold.");
|
||||
coldstr = strstr(sym->name, ".cold");
|
||||
if (!coldstr)
|
||||
continue;
|
||||
|
||||
|
@ -379,6 +379,7 @@ static int read_relas(struct elf *elf)
|
|||
rela->offset = rela->rela.r_offset;
|
||||
symndx = GELF_R_SYM(rela->rela.r_info);
|
||||
rela->sym = find_symbol_by_index(elf, symndx);
|
||||
rela->rela_sec = sec;
|
||||
if (!rela->sym) {
|
||||
WARN("can't find rela entry symbol %d for %s",
|
||||
symndx, sec->name);
|
||||
|
|
|
@ -48,7 +48,7 @@ struct section {
|
|||
char *name;
|
||||
int idx;
|
||||
unsigned int len;
|
||||
bool changed, text;
|
||||
bool changed, text, rodata;
|
||||
};
|
||||
|
||||
struct symbol {
|
||||
|
@ -68,6 +68,7 @@ struct rela {
|
|||
struct list_head list;
|
||||
struct hlist_node hash;
|
||||
GElf_Rela rela;
|
||||
struct section *rela_sec;
|
||||
struct symbol *sym;
|
||||
unsigned int type;
|
||||
unsigned long offset;
|
||||
|
|
Loading…
Reference in New Issue