Merge branches 'x86-cleanups-for-linus', 'x86-vmware-for-linus', 'x86-mtrr-for-linus', 'x86-apic-for-linus', 'x86-fpu-for-linus' and 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Clean up arch/x86/kernel/cpu/mtrr/cleanup.c: use ";" not "," to terminate statements * 'x86-vmware-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, vmware: Preset lpj values when on VMware. * 'x86-mtrr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, mtrr: Use stop machine context to rendezvous all the cpu's * 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86/apic/es7000_32: Remove unused variable * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Avoid unnecessary __clear_user() and xrstor in signal handling * 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, vdso: Unmap vdso pages
This commit is contained in:
commit
75cb5fdce2
|
@ -127,6 +127,15 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
|
|||
{
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Clear the bytes not touched by the fxsave and reserved
|
||||
* for the SW usage.
|
||||
*/
|
||||
err = __clear_user(&fx->sw_reserved,
|
||||
sizeof(struct _fpx_sw_bytes));
|
||||
if (unlikely(err))
|
||||
return -EFAULT;
|
||||
|
||||
asm volatile("1: rex64/fxsave (%[fx])\n\t"
|
||||
"2:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
|
|
|
@ -65,6 +65,16 @@ static inline int fpu_xrstor_checking(struct fpu *fpu)
|
|||
static inline int xsave_user(struct xsave_struct __user *buf)
|
||||
{
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Clear the xsave header first, so that reserved fields are
|
||||
* initialized to zero.
|
||||
*/
|
||||
err = __clear_user(&buf->xsave_hdr,
|
||||
sizeof(struct xsave_hdr_struct));
|
||||
if (unlikely(err))
|
||||
return -EFAULT;
|
||||
|
||||
__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
|
||||
"2:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
|
|
|
@ -129,7 +129,6 @@ int es7000_plat;
|
|||
* GSI override for ES7000 platforms.
|
||||
*/
|
||||
|
||||
static unsigned int base;
|
||||
|
||||
static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
|
||||
{
|
||||
|
|
|
@ -632,9 +632,9 @@ static void __init mtrr_print_out_one_result(int i)
|
|||
unsigned long gran_base, chunk_base, lose_base;
|
||||
char gran_factor, chunk_factor, lose_factor;
|
||||
|
||||
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
|
||||
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
|
||||
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
|
||||
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor);
|
||||
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor);
|
||||
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor);
|
||||
|
||||
pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
|
||||
result[i].bad ? "*BAD*" : " ",
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
|
||||
#include <linux/types.h> /* FIXME: kvm_para.h needs this */
|
||||
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/kvm_para.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -143,22 +144,28 @@ struct set_mtrr_data {
|
|||
mtrr_type smp_type;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work);
|
||||
|
||||
/**
|
||||
* ipi_handler - Synchronisation handler. Executed by "other" CPUs.
|
||||
* mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs.
|
||||
* @info: pointer to mtrr configuration data
|
||||
*
|
||||
* Returns nothing.
|
||||
*/
|
||||
static void ipi_handler(void *info)
|
||||
static int mtrr_work_handler(void *info)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct set_mtrr_data *data = info;
|
||||
unsigned long flags;
|
||||
|
||||
atomic_dec(&data->count);
|
||||
while (!atomic_read(&data->gate))
|
||||
cpu_relax();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
atomic_dec(&data->count);
|
||||
while (!atomic_read(&data->gate))
|
||||
while (atomic_read(&data->gate))
|
||||
cpu_relax();
|
||||
|
||||
/* The master has cleared me to execute */
|
||||
|
@ -173,12 +180,13 @@ static void ipi_handler(void *info)
|
|||
}
|
||||
|
||||
atomic_dec(&data->count);
|
||||
while (atomic_read(&data->gate))
|
||||
while (!atomic_read(&data->gate))
|
||||
cpu_relax();
|
||||
|
||||
atomic_dec(&data->count);
|
||||
local_irq_restore(flags);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int types_compatible(mtrr_type type1, mtrr_type type2)
|
||||
|
@ -198,7 +206,7 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
|
|||
*
|
||||
* This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
|
||||
*
|
||||
* 1. Send IPI to do the following:
|
||||
* 1. Queue work to do the following on all processors:
|
||||
* 2. Disable Interrupts
|
||||
* 3. Wait for all procs to do so
|
||||
* 4. Enter no-fill cache mode
|
||||
|
@ -215,14 +223,17 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2)
|
|||
* 15. Enable interrupts.
|
||||
*
|
||||
* What does that mean for us? Well, first we set data.count to the number
|
||||
* of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
|
||||
* until it hits 0 and proceed. We set the data.gate flag and reset data.count.
|
||||
* Meanwhile, they are waiting for that flag to be set. Once it's set, each
|
||||
* of CPUs. As each CPU announces that it started the rendezvous handler by
|
||||
* decrementing the count, We reset data.count and set the data.gate flag
|
||||
* allowing all the cpu's to proceed with the work. As each cpu disables
|
||||
* interrupts, it'll decrement data.count once. We wait until it hits 0 and
|
||||
* proceed. We clear the data.gate flag and reset data.count. Meanwhile, they
|
||||
* are waiting for that flag to be cleared. Once it's cleared, each
|
||||
* CPU goes through the transition of updating MTRRs.
|
||||
* The CPU vendors may each do it differently,
|
||||
* so we call mtrr_if->set() callback and let them take care of it.
|
||||
* When they're done, they again decrement data->count and wait for data.gate
|
||||
* to be reset.
|
||||
* to be set.
|
||||
* When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
|
||||
* Everyone then enables interrupts and we all continue on.
|
||||
*
|
||||
|
@ -234,6 +245,9 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
|
|||
{
|
||||
struct set_mtrr_data data;
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
data.smp_reg = reg;
|
||||
data.smp_base = base;
|
||||
|
@ -246,8 +260,23 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
|
|||
atomic_set(&data.gate, 0);
|
||||
|
||||
/* Start the ball rolling on other CPUs */
|
||||
if (smp_call_function(ipi_handler, &data, 0) != 0)
|
||||
panic("mtrr: timed out waiting for other CPUs\n");
|
||||
for_each_online_cpu(cpu) {
|
||||
struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu);
|
||||
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
|
||||
stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work);
|
||||
}
|
||||
|
||||
|
||||
while (atomic_read(&data.count))
|
||||
cpu_relax();
|
||||
|
||||
/* Ok, reset count and toggle gate */
|
||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
||||
smp_wmb();
|
||||
atomic_set(&data.gate, 1);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
|
@ -257,7 +286,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
|
|||
/* Ok, reset count and toggle gate */
|
||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
||||
smp_wmb();
|
||||
atomic_set(&data.gate, 1);
|
||||
atomic_set(&data.gate, 0);
|
||||
|
||||
/* Do our MTRR business */
|
||||
|
||||
|
@ -279,7 +308,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
|
|||
|
||||
atomic_set(&data.count, num_booting_cpus() - 1);
|
||||
smp_wmb();
|
||||
atomic_set(&data.gate, 0);
|
||||
atomic_set(&data.gate, 1);
|
||||
|
||||
/*
|
||||
* Wait here for everyone to have seen the gate change
|
||||
|
@ -289,6 +318,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ
|
|||
cpu_relax();
|
||||
|
||||
local_irq_restore(flags);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -51,7 +51,7 @@ static inline int __vmware_platform(void)
|
|||
|
||||
static unsigned long vmware_get_tsc_khz(void)
|
||||
{
|
||||
uint64_t tsc_hz;
|
||||
uint64_t tsc_hz, lpj;
|
||||
uint32_t eax, ebx, ecx, edx;
|
||||
|
||||
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
|
||||
|
@ -62,6 +62,13 @@ static unsigned long vmware_get_tsc_khz(void)
|
|||
printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
|
||||
(unsigned long) tsc_hz / 1000,
|
||||
(unsigned long) tsc_hz % 1000);
|
||||
|
||||
if (!preset_lpj) {
|
||||
lpj = ((u64)tsc_hz * 1000);
|
||||
do_div(lpj, HZ);
|
||||
preset_lpj = lpj;
|
||||
}
|
||||
|
||||
return tsc_hz;
|
||||
}
|
||||
|
||||
|
|
|
@ -816,6 +816,13 @@ do_rest:
|
|||
if (cpumask_test_cpu(cpu, cpu_callin_mask))
|
||||
break; /* It has booted */
|
||||
udelay(100);
|
||||
/*
|
||||
* Allow other tasks to run while we wait for the
|
||||
* AP to come online. This also gives a chance
|
||||
* for the MTRR work(triggered by the AP coming online)
|
||||
* to be completed in the stop machine context.
|
||||
*/
|
||||
schedule();
|
||||
}
|
||||
|
||||
if (cpumask_test_cpu(cpu, cpu_callin_mask))
|
||||
|
|
|
@ -92,14 +92,6 @@ int save_i387_xstate(void __user *buf)
|
|||
return 0;
|
||||
|
||||
if (task_thread_info(tsk)->status & TS_USEDFPU) {
|
||||
/*
|
||||
* Start with clearing the user buffer. This will present a
|
||||
* clean context for the bytes not touched by the fxsave/xsave.
|
||||
*/
|
||||
err = __clear_user(buf, sig_xstate_size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (use_xsave())
|
||||
err = xsave_user(buf);
|
||||
else
|
||||
|
@ -185,8 +177,8 @@ static int restore_user_xstate(void __user *buf)
|
|||
* init the state skipped by the user.
|
||||
*/
|
||||
mask = pcntxt_mask & ~mask;
|
||||
|
||||
xrstor_state(init_xstate_buf, mask);
|
||||
if (unlikely(mask))
|
||||
xrstor_state(init_xstate_buf, mask);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ static int __init init_vdso_vars(void)
|
|||
*(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
|
||||
#include "vextern.h"
|
||||
#undef VEXTERN
|
||||
vunmap(vbase);
|
||||
return 0;
|
||||
|
||||
oom:
|
||||
|
|
Loading…
Reference in New Issue