Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar: "This series tightens up RDPMC permissions: currently even highly sandboxed x86 execution environments (such as seccomp) have permission to execute RDPMC, which may leak various perf events / PMU state such as timing information and other CPU execution details. This 'all is allowed' RDPMC mode is still preserved as the (non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is that RDPMC access is only allowed if a perf event is mmap-ed (which is needed to correctly interpret RDPMC counter values in any case). As a side effect of these changes CR4 handling is cleaned up in the x86 code and a shadow copy of the CR4 value is added. The extra CR4 manipulation adds ~ <50ns to the context switch cost between rdpmc-capable and rdpmc-non-capable mms" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks perf/x86: Only allow rdpmc if a perf_event is mapped perf: Pass the event to arch_perf_update_userpage() perf: Add pmu callbacks to track event mapping and unmapping x86: Add a comment clarifying LDT context switching x86: Store a per-cpu shadow copy of CR4 x86: Clean up cr4 manipulation
This commit is contained in:
commit
37507717de
|
@ -19,6 +19,8 @@ typedef struct {
|
|||
|
||||
struct mutex lock;
|
||||
void __user *vdso;
|
||||
|
||||
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
|
||||
} mm_context_t;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -18,6 +18,21 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
|
|||
}
|
||||
#endif /* !CONFIG_PARAVIRT */
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
extern struct static_key rdpmc_always_available;
|
||||
|
||||
static inline void load_mm_cr4(struct mm_struct *mm)
|
||||
{
|
||||
if (static_key_true(&rdpmc_always_available) ||
|
||||
atomic_read(&mm->context.perf_rdpmc_allowed))
|
||||
cr4_set_bits(X86_CR4_PCE);
|
||||
else
|
||||
cr4_clear_bits(X86_CR4_PCE);
|
||||
}
|
||||
#else
|
||||
static inline void load_mm_cr4(struct mm_struct *mm) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Used for LDT copy/destruction.
|
||||
*/
|
||||
|
@ -52,15 +67,20 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
/* Stop flush ipis for the previous mm */
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
|
||||
/* Load per-mm CR4 state */
|
||||
load_mm_cr4(next);
|
||||
|
||||
/*
|
||||
* Load the LDT, if the LDT is different.
|
||||
*
|
||||
* It's possible leave_mm(prev) has been called. If so,
|
||||
* then prev->context.ldt could be out of sync with the
|
||||
* LDT descriptor or the LDT register. This can only happen
|
||||
* if prev->context.ldt is non-null, since we never free
|
||||
* an LDT. But LDTs can't be shared across mms, so
|
||||
* prev->context.ldt won't be equal to next->context.ldt.
|
||||
* It's possible that prev->context.ldt doesn't match
|
||||
* the LDT register. This can happen if leave_mm(prev)
|
||||
* was called and then modify_ldt changed
|
||||
* prev->context.ldt but suppressed an IPI to this CPU.
|
||||
* In this case, prev->context.ldt != NULL, because we
|
||||
* never free an LDT while the mm still exists. That
|
||||
* means that next->context.ldt != prev->context.ldt,
|
||||
* because mms never share an LDT.
|
||||
*/
|
||||
if (unlikely(prev->context.ldt != next->context.ldt))
|
||||
load_LDT_nolock(&next->context);
|
||||
|
@ -85,6 +105,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
*/
|
||||
load_cr3(next->pgd);
|
||||
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
load_mm_cr4(next);
|
||||
load_LDT_nolock(&next->context);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,16 +80,16 @@ static inline void write_cr3(unsigned long x)
|
|||
PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr4(void)
|
||||
static inline unsigned long __read_cr4(void)
|
||||
{
|
||||
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
|
||||
}
|
||||
static inline unsigned long read_cr4_safe(void)
|
||||
static inline unsigned long __read_cr4_safe(void)
|
||||
{
|
||||
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
|
||||
}
|
||||
|
||||
static inline void write_cr4(unsigned long x)
|
||||
static inline void __write_cr4(unsigned long x)
|
||||
{
|
||||
PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
|
||||
}
|
||||
|
|
|
@ -579,39 +579,6 @@ static inline void load_sp0(struct tss_struct *tss,
|
|||
#define set_iopl_mask native_set_iopl_mask
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
/*
|
||||
* Save the cr4 feature set we're using (ie
|
||||
* Pentium 4MB enable and PPro Global page
|
||||
* enable), so that any CPU's that boot up
|
||||
* after us can get the correct flags.
|
||||
*/
|
||||
extern unsigned long mmu_cr4_features;
|
||||
extern u32 *trampoline_cr4_features;
|
||||
|
||||
static inline void set_in_cr4(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
mmu_cr4_features |= mask;
|
||||
if (trampoline_cr4_features)
|
||||
*trampoline_cr4_features = mmu_cr4_features;
|
||||
cr4 = read_cr4();
|
||||
cr4 |= mask;
|
||||
write_cr4(cr4);
|
||||
}
|
||||
|
||||
static inline void clear_in_cr4(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
mmu_cr4_features &= ~mask;
|
||||
if (trampoline_cr4_features)
|
||||
*trampoline_cr4_features = mmu_cr4_features;
|
||||
cr4 = read_cr4();
|
||||
cr4 &= ~mask;
|
||||
write_cr4(cr4);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
unsigned long seg;
|
||||
} mm_segment_t;
|
||||
|
|
|
@ -137,17 +137,17 @@ static inline void write_cr3(unsigned long x)
|
|||
native_write_cr3(x);
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr4(void)
|
||||
static inline unsigned long __read_cr4(void)
|
||||
{
|
||||
return native_read_cr4();
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr4_safe(void)
|
||||
static inline unsigned long __read_cr4_safe(void)
|
||||
{
|
||||
return native_read_cr4_safe();
|
||||
}
|
||||
|
||||
static inline void write_cr4(unsigned long x)
|
||||
static inline void __write_cr4(unsigned long x)
|
||||
{
|
||||
native_write_cr4(x);
|
||||
}
|
||||
|
|
|
@ -15,6 +15,75 @@
|
|||
#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
|
||||
#endif
|
||||
|
||||
struct tlb_state {
|
||||
#ifdef CONFIG_SMP
|
||||
struct mm_struct *active_mm;
|
||||
int state;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Access to this CR4 shadow and to H/W CR4 is protected by
|
||||
* disabling interrupts when modifying either one.
|
||||
*/
|
||||
unsigned long cr4;
|
||||
};
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
||||
|
||||
/* Initialize cr4 shadow for this CPU. */
|
||||
static inline void cr4_init_shadow(void)
|
||||
{
|
||||
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
|
||||
}
|
||||
|
||||
/* Set in this cpu's CR4. */
|
||||
static inline void cr4_set_bits(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
if ((cr4 | mask) != cr4) {
|
||||
cr4 |= mask;
|
||||
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
||||
__write_cr4(cr4);
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear in this cpu's CR4. */
|
||||
static inline void cr4_clear_bits(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
if ((cr4 & ~mask) != cr4) {
|
||||
cr4 &= ~mask;
|
||||
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
||||
__write_cr4(cr4);
|
||||
}
|
||||
}
|
||||
|
||||
/* Read the CR4 shadow. */
|
||||
static inline unsigned long cr4_read_shadow(void)
|
||||
{
|
||||
return this_cpu_read(cpu_tlbstate.cr4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save some of cr4 feature set we're using (e.g. Pentium 4MB
|
||||
* enable and PPro Global page enable), so that any CPU's that boot
|
||||
* up after us can get the correct flags. This should only be used
|
||||
* during boot on the boot cpu.
|
||||
*/
|
||||
extern unsigned long mmu_cr4_features;
|
||||
extern u32 *trampoline_cr4_features;
|
||||
|
||||
static inline void cr4_set_bits_and_update_boot(unsigned long mask)
|
||||
{
|
||||
mmu_cr4_features |= mask;
|
||||
if (trampoline_cr4_features)
|
||||
*trampoline_cr4_features = mmu_cr4_features;
|
||||
cr4_set_bits(mask);
|
||||
}
|
||||
|
||||
static inline void __native_flush_tlb(void)
|
||||
{
|
||||
native_write_cr3(native_read_cr3());
|
||||
|
@ -24,7 +93,7 @@ static inline void __native_flush_tlb_global_irq_disabled(void)
|
|||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cr4 = native_read_cr4();
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
/* clear PGE */
|
||||
native_write_cr4(cr4 & ~X86_CR4_PGE);
|
||||
/* write old PGE again and flush TLBs */
|
||||
|
@ -184,12 +253,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|||
#define TLBSTATE_OK 1
|
||||
#define TLBSTATE_LAZY 2
|
||||
|
||||
struct tlb_state {
|
||||
struct mm_struct *active_mm;
|
||||
int state;
|
||||
};
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
|
||||
|
||||
static inline void reset_lazy_tlbstate(void)
|
||||
{
|
||||
this_cpu_write(cpu_tlbstate.state, 0);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include <asm/vmx.h>
|
||||
#include <asm/svm.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/*
|
||||
* VMX functions:
|
||||
|
@ -40,12 +41,12 @@ static inline int cpu_has_vmx(void)
|
|||
static inline void cpu_vmxoff(void)
|
||||
{
|
||||
asm volatile (ASM_VMX_VMXOFF : : : "cc");
|
||||
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
||||
cr4_clear_bits(X86_CR4_VMXE);
|
||||
}
|
||||
|
||||
static inline int cpu_vmx_enabled(void)
|
||||
{
|
||||
return read_cr4() & X86_CR4_VMXE;
|
||||
return __read_cr4() & X86_CR4_VMXE;
|
||||
}
|
||||
|
||||
/** Disable VMX if it is enabled on the current CPU
|
||||
|
|
|
@ -78,7 +78,7 @@ int x86_acpi_suspend_lowlevel(void)
|
|||
|
||||
header->pmode_cr0 = read_cr0();
|
||||
if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
|
||||
header->pmode_cr4 = read_cr4();
|
||||
header->pmode_cr4 = __read_cr4();
|
||||
header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
|
||||
}
|
||||
if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <asm/archrandom.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/vsyscall.h>
|
||||
|
@ -278,7 +279,7 @@ __setup("nosmep", setup_disable_smep);
|
|||
static __always_inline void setup_smep(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_SMEP))
|
||||
set_in_cr4(X86_CR4_SMEP);
|
||||
cr4_set_bits(X86_CR4_SMEP);
|
||||
}
|
||||
|
||||
static __init int setup_disable_smap(char *arg)
|
||||
|
@ -298,9 +299,9 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
|||
|
||||
if (cpu_has(c, X86_FEATURE_SMAP)) {
|
||||
#ifdef CONFIG_X86_SMAP
|
||||
set_in_cr4(X86_CR4_SMAP);
|
||||
cr4_set_bits(X86_CR4_SMAP);
|
||||
#else
|
||||
clear_in_cr4(X86_CR4_SMAP);
|
||||
cr4_clear_bits(X86_CR4_SMAP);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -1294,6 +1295,12 @@ void cpu_init(void)
|
|||
|
||||
wait_for_master_cpu(cpu);
|
||||
|
||||
/*
|
||||
* Initialize the CR4 shadow before doing anything that could
|
||||
* try to read it.
|
||||
*/
|
||||
cr4_init_shadow();
|
||||
|
||||
/*
|
||||
* Load microcode on this cpu if a valid microcode is available.
|
||||
* This is early microcode loading procedure.
|
||||
|
@ -1313,7 +1320,7 @@ void cpu_init(void)
|
|||
|
||||
pr_debug("Initializing CPU#%d\n", cpu);
|
||||
|
||||
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
||||
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
||||
|
||||
/*
|
||||
* Initialize the per-CPU GDT with the boot GDT,
|
||||
|
@ -1394,7 +1401,7 @@ void cpu_init(void)
|
|||
printk(KERN_INFO "Initializing CPU#%d\n", cpu);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
|
||||
clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
||||
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
||||
|
||||
load_current_idt();
|
||||
switch_to_new_gdt(cpu);
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
|
@ -1452,7 +1453,7 @@ static void __mcheck_cpu_init_generic(void)
|
|||
bitmap_fill(all_banks, MAX_NR_BANKS);
|
||||
machine_check_poll(MCP_UC | m_fl, &all_banks);
|
||||
|
||||
set_in_cr4(X86_CR4_MCE);
|
||||
cr4_set_bits(X86_CR4_MCE);
|
||||
|
||||
rdmsrl(MSR_IA32_MCG_CAP, cap);
|
||||
if (cap & MCG_CTL_P)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
|
@ -65,7 +66,7 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
|
|||
"Intel old style machine check architecture supported.\n");
|
||||
|
||||
/* Enable MCE: */
|
||||
set_in_cr4(X86_CR4_MCE);
|
||||
cr4_set_bits(X86_CR4_MCE);
|
||||
printk(KERN_INFO
|
||||
"Intel old style machine check reporting enabled on CPU#%d.\n",
|
||||
smp_processor_id());
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/mce.h>
|
||||
#include <asm/msr.h>
|
||||
|
||||
|
@ -36,7 +37,7 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
|
|||
lo &= ~(1<<4); /* Enable MCE */
|
||||
wrmsr(MSR_IDT_FCR1, lo, hi);
|
||||
|
||||
set_in_cr4(X86_CR4_MCE);
|
||||
cr4_set_bits(X86_CR4_MCE);
|
||||
|
||||
printk(KERN_INFO
|
||||
"Winchip machine check reporting enabled on CPU#0.\n");
|
||||
|
|
|
@ -138,8 +138,8 @@ static void prepare_set(void)
|
|||
|
||||
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
||||
if (cpu_has_pge) {
|
||||
cr4 = read_cr4();
|
||||
write_cr4(cr4 & ~X86_CR4_PGE);
|
||||
cr4 = __read_cr4();
|
||||
__write_cr4(cr4 & ~X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -171,7 +171,7 @@ static void post_set(void)
|
|||
|
||||
/* Restore value of CR4 */
|
||||
if (cpu_has_pge)
|
||||
write_cr4(cr4);
|
||||
__write_cr4(cr4);
|
||||
}
|
||||
|
||||
static void cyrix_set_arr(unsigned int reg, unsigned long base,
|
||||
|
|
|
@ -678,8 +678,8 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
|
|||
|
||||
/* Save value of CR4 and clear Page Global Enable (bit 7) */
|
||||
if (cpu_has_pge) {
|
||||
cr4 = read_cr4();
|
||||
write_cr4(cr4 & ~X86_CR4_PGE);
|
||||
cr4 = __read_cr4();
|
||||
__write_cr4(cr4 & ~X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
|
||||
|
@ -708,7 +708,7 @@ static void post_set(void) __releases(set_atomicity_lock)
|
|||
|
||||
/* Restore value of CR4 */
|
||||
if (cpu_has_pge)
|
||||
write_cr4(cr4);
|
||||
__write_cr4(cr4);
|
||||
raw_spin_unlock(&set_atomicity_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#include <asm/nmi.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/ldt.h>
|
||||
|
@ -43,6 +45,8 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
|||
.enabled = 1,
|
||||
};
|
||||
|
||||
struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
|
||||
|
||||
u64 __read_mostly hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
|
@ -1327,8 +1331,6 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
|
|||
break;
|
||||
|
||||
case CPU_STARTING:
|
||||
if (x86_pmu.attr_rdpmc)
|
||||
set_in_cr4(X86_CR4_PCE);
|
||||
if (x86_pmu.cpu_starting)
|
||||
x86_pmu.cpu_starting(cpu);
|
||||
break;
|
||||
|
@ -1804,14 +1806,44 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|||
event->destroy(event);
|
||||
}
|
||||
|
||||
if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
|
||||
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void refresh_pce(void *ignored)
|
||||
{
|
||||
if (current->mm)
|
||||
load_mm_cr4(current->mm);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
{
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_unmapped(struct perf_event *event)
|
||||
{
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed))
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
||||
static int x86_pmu_event_idx(struct perf_event *event)
|
||||
{
|
||||
int idx = event->hw.idx;
|
||||
|
||||
if (!x86_pmu.attr_rdpmc)
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return 0;
|
||||
|
||||
if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
|
||||
|
@ -1829,16 +1861,6 @@ static ssize_t get_attr_rdpmc(struct device *cdev,
|
|||
return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
|
||||
}
|
||||
|
||||
static void change_rdpmc(void *info)
|
||||
{
|
||||
bool enable = !!(unsigned long)info;
|
||||
|
||||
if (enable)
|
||||
set_in_cr4(X86_CR4_PCE);
|
||||
else
|
||||
clear_in_cr4(X86_CR4_PCE);
|
||||
}
|
||||
|
||||
static ssize_t set_attr_rdpmc(struct device *cdev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
|
@ -1850,14 +1872,27 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (val > 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (x86_pmu.attr_rdpmc_broken)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (!!val != !!x86_pmu.attr_rdpmc) {
|
||||
x86_pmu.attr_rdpmc = !!val;
|
||||
on_each_cpu(change_rdpmc, (void *)val, 1);
|
||||
if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
|
||||
/*
|
||||
* Changing into or out of always available, aka
|
||||
* perf-event-bypassing mode. This path is extremely slow,
|
||||
* but only root can trigger it, so it's okay.
|
||||
*/
|
||||
if (val == 2)
|
||||
static_key_slow_inc(&rdpmc_always_available);
|
||||
else
|
||||
static_key_slow_dec(&rdpmc_always_available);
|
||||
on_each_cpu(refresh_pce, NULL, 1);
|
||||
}
|
||||
|
||||
x86_pmu.attr_rdpmc = val;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -1900,6 +1935,9 @@ static struct pmu pmu = {
|
|||
|
||||
.event_init = x86_pmu_event_init,
|
||||
|
||||
.event_mapped = x86_pmu_event_mapped,
|
||||
.event_unmapped = x86_pmu_event_unmapped,
|
||||
|
||||
.add = x86_pmu_add,
|
||||
.del = x86_pmu_del,
|
||||
.start = x86_pmu_start,
|
||||
|
@ -1914,13 +1952,15 @@ static struct pmu pmu = {
|
|||
.flush_branch_stack = x86_pmu_flush_branch_stack,
|
||||
};
|
||||
|
||||
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
||||
void arch_perf_update_userpage(struct perf_event *event,
|
||||
struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
struct cyc2ns_data *data;
|
||||
|
||||
userpg->cap_user_time = 0;
|
||||
userpg->cap_user_time_zero = 0;
|
||||
userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
|
||||
userpg->cap_user_rdpmc =
|
||||
!!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
|
||||
userpg->pmc_width = x86_pmu.cntval_bits;
|
||||
|
||||
if (!sched_clock_stable())
|
||||
|
|
|
@ -71,6 +71,8 @@ struct event_constraint {
|
|||
#define PERF_X86_EVENT_COMMITTED 0x8 /* event passed commit_txn */
|
||||
#define PERF_X86_EVENT_PEBS_LD_HSW 0x10 /* haswell style datala, load */
|
||||
#define PERF_X86_EVENT_PEBS_NA_HSW 0x20 /* haswell style datala, unknown */
|
||||
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x40 /* grant rdpmc permission */
|
||||
|
||||
|
||||
struct amd_nb {
|
||||
int nb_id; /* NorthBridge id */
|
||||
|
|
|
@ -31,6 +31,7 @@ static void __init i386_default_early_setup(void)
|
|||
|
||||
asmlinkage __visible void __init i386_start_kernel(void)
|
||||
{
|
||||
cr4_init_shadow();
|
||||
sanitize_boot_params(&boot_params);
|
||||
|
||||
/* Call the subarch specific early setup function */
|
||||
|
|
|
@ -156,6 +156,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
|
|||
(__START_KERNEL & PGDIR_MASK)));
|
||||
BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
|
||||
|
||||
cr4_init_shadow();
|
||||
|
||||
/* Kill off the identity-map trampoline */
|
||||
reset_early_page_tables();
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/sigcontext.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/math_emu.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/i387.h>
|
||||
|
@ -193,7 +194,7 @@ void fpu_init(void)
|
|||
if (cpu_has_xmm)
|
||||
cr4_mask |= X86_CR4_OSXMMEXCPT;
|
||||
if (cr4_mask)
|
||||
set_in_cr4(cr4_mask);
|
||||
cr4_set_bits(cr4_mask);
|
||||
|
||||
cr0 = read_cr0();
|
||||
cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <asm/fpu-internal.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
|
@ -141,7 +142,7 @@ void flush_thread(void)
|
|||
|
||||
static void hard_disable_TSC(void)
|
||||
{
|
||||
write_cr4(read_cr4() | X86_CR4_TSD);
|
||||
cr4_set_bits(X86_CR4_TSD);
|
||||
}
|
||||
|
||||
void disable_TSC(void)
|
||||
|
@ -158,7 +159,7 @@ void disable_TSC(void)
|
|||
|
||||
static void hard_enable_TSC(void)
|
||||
{
|
||||
write_cr4(read_cr4() & ~X86_CR4_TSD);
|
||||
cr4_clear_bits(X86_CR4_TSD);
|
||||
}
|
||||
|
||||
static void enable_TSC(void)
|
||||
|
|
|
@ -101,7 +101,7 @@ void __show_regs(struct pt_regs *regs, int all)
|
|||
cr0 = read_cr0();
|
||||
cr2 = read_cr2();
|
||||
cr3 = read_cr3();
|
||||
cr4 = read_cr4_safe();
|
||||
cr4 = __read_cr4_safe();
|
||||
printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
|
||||
cr0, cr2, cr3, cr4);
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ void __show_regs(struct pt_regs *regs, int all)
|
|||
cr0 = read_cr0();
|
||||
cr2 = read_cr2();
|
||||
cr3 = read_cr3();
|
||||
cr4 = read_cr4();
|
||||
cr4 = __read_cr4();
|
||||
|
||||
printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
|
||||
fs, fsindex, gs, gsindex, shadowgs);
|
||||
|
|
|
@ -1179,7 +1179,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
if (boot_cpu_data.cpuid_level >= 0) {
|
||||
/* A CPU has %cr4 if and only if it has CPUID */
|
||||
mmu_cr4_features = read_cr4();
|
||||
mmu_cr4_features = __read_cr4();
|
||||
if (trampoline_cr4_features)
|
||||
*trampoline_cr4_features = mmu_cr4_features;
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/i387.h>
|
||||
#include <asm/fpu-internal.h>
|
||||
#include <asm/sigframe.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/xcr.h>
|
||||
|
||||
/*
|
||||
|
@ -453,7 +454,7 @@ static void prepare_fx_sw_frame(void)
|
|||
*/
|
||||
static inline void xstate_enable(void)
|
||||
{
|
||||
set_in_cr4(X86_CR4_OSXSAVE);
|
||||
cr4_set_bits(X86_CR4_OSXSAVE);
|
||||
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
|
||||
}
|
||||
|
||||
|
|
|
@ -1583,7 +1583,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|||
|
||||
static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
{
|
||||
unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
|
||||
unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
|
||||
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
|
||||
|
||||
if (cr4 & X86_CR4_VMXE)
|
||||
|
|
|
@ -2871,7 +2871,7 @@ static int hardware_enable(void)
|
|||
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
|
||||
u64 old, test_bits;
|
||||
|
||||
if (read_cr4() & X86_CR4_VMXE)
|
||||
if (cr4_read_shadow() & X86_CR4_VMXE)
|
||||
return -EBUSY;
|
||||
|
||||
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
|
||||
|
@ -2898,7 +2898,7 @@ static int hardware_enable(void)
|
|||
/* enable and lock */
|
||||
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
|
||||
}
|
||||
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
|
||||
cr4_set_bits(X86_CR4_VMXE);
|
||||
|
||||
if (vmm_exclusive) {
|
||||
kvm_cpu_vmxon(phys_addr);
|
||||
|
@ -2935,7 +2935,7 @@ static void hardware_disable(void)
|
|||
vmclear_local_loaded_vmcss();
|
||||
kvm_cpu_vmxoff();
|
||||
}
|
||||
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
||||
cr4_clear_bits(X86_CR4_VMXE);
|
||||
}
|
||||
|
||||
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
|
||||
|
@ -4450,7 +4450,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|||
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
|
||||
|
||||
/* Save the most likely value for this task's CR4 in the VMCS. */
|
||||
cr4 = read_cr4();
|
||||
cr4 = cr4_read_shadow();
|
||||
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
|
||||
vmx->host_state.vmcs_host_cr4 = cr4;
|
||||
|
||||
|
@ -8146,7 +8146,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
|
||||
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
||||
|
||||
cr4 = read_cr4();
|
||||
cr4 = cr4_read_shadow();
|
||||
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
|
||||
vmcs_writel(HOST_CR4, cr4);
|
||||
vmx->host_state.vmcs_host_cr4 = cr4;
|
||||
|
|
|
@ -600,7 +600,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
|
|||
printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
|
||||
if (pte && pte_present(*pte) && pte_exec(*pte) &&
|
||||
(pgd_flags(*pgd) & _PAGE_USER) &&
|
||||
(read_cr4() & X86_CR4_SMEP))
|
||||
(__read_cr4() & X86_CR4_SMEP))
|
||||
printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
|
||||
}
|
||||
|
||||
|
|
|
@ -173,11 +173,11 @@ static void __init probe_page_size_mask(void)
|
|||
|
||||
/* Enable PSE if available */
|
||||
if (cpu_has_pse)
|
||||
set_in_cr4(X86_CR4_PSE);
|
||||
cr4_set_bits_and_update_boot(X86_CR4_PSE);
|
||||
|
||||
/* Enable PGE if available */
|
||||
if (cpu_has_pge) {
|
||||
set_in_cr4(X86_CR4_PGE);
|
||||
cr4_set_bits_and_update_boot(X86_CR4_PGE);
|
||||
__supported_pte_mask |= _PAGE_GLOBAL;
|
||||
}
|
||||
}
|
||||
|
@ -713,6 +713,15 @@ void __init zone_sizes_init(void)
|
|||
free_area_init_nodes(max_zone_pfns);
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
|
||||
#ifdef CONFIG_SMP
|
||||
.active_mm = &init_mm,
|
||||
.state = 0,
|
||||
#endif
|
||||
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(cpu_tlbstate);
|
||||
|
||||
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
|
||||
{
|
||||
/* entry 0 MUST be WB (hardwired to speed up translations) */
|
||||
|
|
|
@ -14,9 +14,6 @@
|
|||
#include <asm/uv/uv.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
|
||||
= { &init_mm, 0, };
|
||||
|
||||
/*
|
||||
* Smarter SMP flushing macros.
|
||||
* c/o Linus Torvalds.
|
||||
|
|
|
@ -105,11 +105,8 @@ static void __save_processor_state(struct saved_context *ctxt)
|
|||
ctxt->cr0 = read_cr0();
|
||||
ctxt->cr2 = read_cr2();
|
||||
ctxt->cr3 = read_cr3();
|
||||
#ifdef CONFIG_X86_32
|
||||
ctxt->cr4 = read_cr4_safe();
|
||||
#else
|
||||
/* CONFIG_X86_64 */
|
||||
ctxt->cr4 = read_cr4();
|
||||
ctxt->cr4 = __read_cr4_safe();
|
||||
#ifdef CONFIG_X86_64
|
||||
ctxt->cr8 = read_cr8();
|
||||
#endif
|
||||
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
|
||||
|
@ -175,12 +172,12 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|||
/* cr4 was introduced in the Pentium CPU */
|
||||
#ifdef CONFIG_X86_32
|
||||
if (ctxt->cr4)
|
||||
write_cr4(ctxt->cr4);
|
||||
__write_cr4(ctxt->cr4);
|
||||
#else
|
||||
/* CONFIG X86_64 */
|
||||
wrmsrl(MSR_EFER, ctxt->efer);
|
||||
write_cr8(ctxt->cr8);
|
||||
write_cr4(ctxt->cr4);
|
||||
__write_cr4(ctxt->cr4);
|
||||
#endif
|
||||
write_cr3(ctxt->cr3);
|
||||
write_cr2(ctxt->cr2);
|
||||
|
|
|
@ -81,7 +81,7 @@ void __init setup_real_mode(void)
|
|||
|
||||
trampoline_header->start = (u64) secondary_startup_64;
|
||||
trampoline_cr4_features = &trampoline_header->cr4;
|
||||
*trampoline_cr4_features = read_cr4();
|
||||
*trampoline_cr4_features = __read_cr4();
|
||||
|
||||
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
||||
trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
|
||||
|
|
|
@ -1494,10 +1494,10 @@ static void xen_pvh_set_cr_flags(int cpu)
|
|||
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
|
||||
*/
|
||||
if (cpu_has_pse)
|
||||
set_in_cr4(X86_CR4_PSE);
|
||||
cr4_set_bits_and_update_boot(X86_CR4_PSE);
|
||||
|
||||
if (cpu_has_pge)
|
||||
set_in_cr4(X86_CR4_PGE);
|
||||
cr4_set_bits_and_update_boot(X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#include <asm/lguest.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include "../lg.h"
|
||||
|
||||
static int cpu_had_pge;
|
||||
|
@ -452,9 +453,9 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
|
|||
static void adjust_pge(void *on)
|
||||
{
|
||||
if (on)
|
||||
write_cr4(read_cr4() | X86_CR4_PGE);
|
||||
cr4_set_bits(X86_CR4_PGE);
|
||||
else
|
||||
write_cr4(read_cr4() & ~X86_CR4_PGE);
|
||||
cr4_clear_bits(X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/*H:020
|
||||
|
|
|
@ -202,6 +202,13 @@ struct pmu {
|
|||
*/
|
||||
int (*event_init) (struct perf_event *event);
|
||||
|
||||
/*
|
||||
* Notification that the event was mapped or unmapped. Called
|
||||
* in the context of the mapping task.
|
||||
*/
|
||||
void (*event_mapped) (struct perf_event *event); /*optional*/
|
||||
void (*event_unmapped) (struct perf_event *event); /*optional*/
|
||||
|
||||
#define PERF_EF_START 0x01 /* start the counter when adding */
|
||||
#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
|
||||
#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
|
||||
|
|
|
@ -4101,7 +4101,8 @@ unlock:
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
||||
void __weak arch_perf_update_userpage(
|
||||
struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -4151,7 +4152,7 @@ void perf_event_update_userpage(struct perf_event *event)
|
|||
userpg->time_running = running +
|
||||
atomic64_read(&event->child_total_time_running);
|
||||
|
||||
arch_perf_update_userpage(userpg, now);
|
||||
arch_perf_update_userpage(event, userpg, now);
|
||||
|
||||
barrier();
|
||||
++userpg->lock;
|
||||
|
@ -4293,6 +4294,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
|
|||
|
||||
atomic_inc(&event->mmap_count);
|
||||
atomic_inc(&event->rb->mmap_count);
|
||||
|
||||
if (event->pmu->event_mapped)
|
||||
event->pmu->event_mapped(event);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4312,6 +4316,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
|
|||
int mmap_locked = rb->mmap_locked;
|
||||
unsigned long size = perf_data_size(rb);
|
||||
|
||||
if (event->pmu->event_unmapped)
|
||||
event->pmu->event_unmapped(event);
|
||||
|
||||
atomic_dec(&rb->mmap_count);
|
||||
|
||||
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
|
||||
|
@ -4513,6 +4520,9 @@ unlock:
|
|||
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_ops = &perf_mmap_vmops;
|
||||
|
||||
if (event->pmu->event_mapped)
|
||||
event->pmu->event_mapped(event);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue