Merge git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Marcelo Tosatti:
 - Fix for guest triggerable BUG_ON (CVE-2014-0155)
 - CR4.SMAP support
 - Spurious WARN_ON() fix

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: remove WARN_ON from get_kernel_ns()
  KVM: Rename variable smep to cr4_smep
  KVM: expose SMAP feature to guest
  KVM: Disable SMAP for guests in EPT realmode and EPT unpaging mode
  KVM: Add SMAP support when setting CR4
  KVM: Remove SMAP bit from CR4_RESERVED_BITS
  KVM: ioapic: try to recover if pending_eoi goes out of range
  KVM: ioapic: fix assignment of ioapic->rtc_status.pending_eoi (CVE-2014-0155)
This commit is contained in:
Linus Torvalds 2014-04-14 16:21:28 -07:00
commit 55101e2d6c
9 changed files with 113 additions and 29 deletions

View File

@ -60,7 +60,7 @@
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)

View File

@ -308,7 +308,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
const u32 kvm_supported_word9_x86_features = const u32 kvm_supported_word9_x86_features =
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
F(ADX); F(ADX) | F(SMAP);
/* all calls to cpuid_count() should be made on the same cpu */ /* all calls to cpuid_count() should be made on the same cpu */
get_cpu(); get_cpu();

View File

@ -48,6 +48,14 @@ static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_SMEP)); return best && (best->ebx & bit(X86_FEATURE_SMEP));
} }
static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 7, 0);
return best && (best->ebx & bit(X86_FEATURE_SMAP));
}
static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;

View File

@ -3601,20 +3601,27 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
} }
} }
static void update_permission_bitmask(struct kvm_vcpu *vcpu, void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept) struct kvm_mmu *mmu, bool ept)
{ {
unsigned bit, byte, pfec; unsigned bit, byte, pfec;
u8 map; u8 map;
bool fault, x, w, u, wf, uf, ff, smep; bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0;
smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
pfec = byte << 1; pfec = byte << 1;
map = 0; map = 0;
wf = pfec & PFERR_WRITE_MASK; wf = pfec & PFERR_WRITE_MASK;
uf = pfec & PFERR_USER_MASK; uf = pfec & PFERR_USER_MASK;
ff = pfec & PFERR_FETCH_MASK; ff = pfec & PFERR_FETCH_MASK;
/*
* PFERR_RSVD_MASK bit is set in PFEC if the access is not
* subject to SMAP restrictions, and cleared otherwise. The
* bit is only meaningful if the SMAP bit is set in CR4.
*/
smapf = !(pfec & PFERR_RSVD_MASK);
for (bit = 0; bit < 8; ++bit) { for (bit = 0; bit < 8; ++bit) {
x = bit & ACC_EXEC_MASK; x = bit & ACC_EXEC_MASK;
w = bit & ACC_WRITE_MASK; w = bit & ACC_WRITE_MASK;
@ -3626,12 +3633,33 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
/* Allow supervisor writes if !cr0.wp */ /* Allow supervisor writes if !cr0.wp */
w |= !is_write_protection(vcpu) && !uf; w |= !is_write_protection(vcpu) && !uf;
/* Disallow supervisor fetches of user code if cr4.smep */ /* Disallow supervisor fetches of user code if cr4.smep */
x &= !(smep && u && !uf); x &= !(cr4_smep && u && !uf);
/*
* SMAP:kernel-mode data accesses from user-mode
* mappings should fault. A fault is considered
* as a SMAP violation if all of the following
* conditions are ture:
* - X86_CR4_SMAP is set in CR4
* - An user page is accessed
* - Page fault in kernel mode
* - if CPL = 3 or X86_EFLAGS_AC is clear
*
* Here, we cover the first three conditions.
* The fourth is computed dynamically in
* permission_fault() and is in smapf.
*
* Also, SMAP does not affect instruction
* fetches, add the !ff check here to make it
* clearer.
*/
smap = cr4_smap && u && !uf && !ff;
} else } else
/* Not really needed: no U/S accesses on ept */ /* Not really needed: no U/S accesses on ept */
u = 1; u = 1;
fault = (ff && !x) || (uf && !u) || (wf && !w); fault = (ff && !x) || (uf && !u) || (wf && !w) ||
(smapf && smap);
map |= fault << bit; map |= fault << bit;
} }
mmu->permissions[byte] = map; mmu->permissions[byte] = map;

View File

@ -44,11 +44,17 @@
#define PT_DIRECTORY_LEVEL 2 #define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1 #define PT_PAGE_TABLE_LEVEL 1
#define PFERR_PRESENT_MASK (1U << 0) #define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_MASK (1U << 1) #define PFERR_WRITE_BIT 1
#define PFERR_USER_MASK (1U << 2) #define PFERR_USER_BIT 2
#define PFERR_RSVD_MASK (1U << 3) #define PFERR_RSVD_BIT 3
#define PFERR_FETCH_MASK (1U << 4) #define PFERR_FETCH_BIT 4
#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
@ -73,6 +79,8 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
bool execonly); bool execonly);
void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
bool ept);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{ {
@ -110,10 +118,30 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
* Will a fault with a given page-fault error code (pfec) cause a permission * Will a fault with a given page-fault error code (pfec) cause a permission
* fault with the given access (in ACC_* format)? * fault with the given access (in ACC_* format)?
*/ */
static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access, static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned pfec) unsigned pte_access, unsigned pfec)
{ {
return (mmu->permissions[pfec >> 1] >> pte_access) & 1; int cpl = kvm_x86_ops->get_cpl(vcpu);
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
/*
* If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
*
* If CPL = 3, SMAP applies to all supervisor-mode data accesses
* (these are implicit supervisor accesses) regardless of the value
* of EFLAGS.AC.
*
* This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
* the result in X86_EFLAGS_AC. We then insert it in place of
* the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
* but it will be one in index if SMAP checks are being overridden.
* It is important to keep this branchless.
*/
unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
int index = (pfec >> 1) +
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
return (mmu->permissions[index] >> pte_access) & 1;
} }
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);

View File

@ -353,7 +353,7 @@ retry_walk:
walker->ptes[walker->level - 1] = pte; walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte)); } while (!is_last_gpte(mmu, walker->level, pte));
if (unlikely(permission_fault(mmu, pte_access, access))) { if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
errcode |= PFERR_PRESENT_MASK; errcode |= PFERR_PRESENT_MASK;
goto error; goto error;
} }

View File

@ -3484,13 +3484,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
hw_cr4 &= ~X86_CR4_PAE; hw_cr4 &= ~X86_CR4_PAE;
hw_cr4 |= X86_CR4_PSE; hw_cr4 |= X86_CR4_PSE;
/* /*
* SMEP is disabled if CPU is in non-paging mode in * SMEP/SMAP is disabled if CPU is in non-paging mode
* hardware. However KVM always uses paging mode to * in hardware. However KVM always uses paging mode to
* emulate guest non-paging mode with TDP. * emulate guest non-paging mode with TDP.
* To emulate this behavior, SMEP needs to be manually * To emulate this behavior, SMEP/SMAP needs to be
* disabled when guest switches to non-paging mode. * manually disabled when guest switches to non-paging
* mode.
*/ */
hw_cr4 &= ~X86_CR4_SMEP; hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
} else if (!(cr4 & X86_CR4_PAE)) { } else if (!(cr4 & X86_CR4_PAE)) {
hw_cr4 &= ~X86_CR4_PAE; hw_cr4 &= ~X86_CR4_PAE;
} }

View File

@ -652,6 +652,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
return 1; return 1;
if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
return 1;
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
return 1; return 1;
@ -680,6 +683,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu); kvm_update_cpuid(vcpu);
@ -1117,7 +1123,6 @@ static inline u64 get_kernel_ns(void)
{ {
struct timespec ts; struct timespec ts;
WARN_ON(preemptible());
ktime_get_ts(&ts); ktime_get_ts(&ts);
monotonic_to_bootbased(&ts); monotonic_to_bootbased(&ts);
return timespec_to_ns(&ts); return timespec_to_ns(&ts);
@ -4164,7 +4169,8 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
| (write ? PFERR_WRITE_MASK : 0); | (write ? PFERR_WRITE_MASK : 0);
if (vcpu_match_mmio_gva(vcpu, gva) if (vcpu_match_mmio_gva(vcpu, gva)
&& !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { && !permission_fault(vcpu, vcpu->arch.walk_mmu,
vcpu->arch.access, access)) {
*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
(gva & (PAGE_SIZE - 1)); (gva & (PAGE_SIZE - 1));
trace_vcpu_match_mmio(gva, *gpa, write, false); trace_vcpu_match_mmio(gva, *gpa, write, false);

View File

@ -97,6 +97,14 @@ static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS); bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
} }
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
{
if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
kvm_rtc_eoi_tracking_restore_all(ioapic);
}
static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{ {
bool new_val, old_val; bool new_val, old_val;
@ -120,9 +128,8 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
} else { } else {
__clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map); __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
ioapic->rtc_status.pending_eoi--; ioapic->rtc_status.pending_eoi--;
rtc_status_pending_eoi_check_valid(ioapic);
} }
WARN_ON(ioapic->rtc_status.pending_eoi < 0);
} }
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
@ -149,10 +156,10 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
{ {
if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
--ioapic->rtc_status.pending_eoi; --ioapic->rtc_status.pending_eoi;
rtc_status_pending_eoi_check_valid(ioapic);
WARN_ON(ioapic->rtc_status.pending_eoi < 0); }
} }
static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
@ -353,10 +360,16 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
ioapic->irr &= ~(1 << irq); ioapic->irr &= ~(1 << irq);
if (irq == RTC_GSI && line_status) { if (irq == RTC_GSI && line_status) {
/*
* pending_eoi cannot ever become negative (see
* rtc_status_pending_eoi_check_valid) and the caller
* ensures that it is only called if it is >= zero, namely
* if rtc_irq_check_coalesced returns false).
*/
BUG_ON(ioapic->rtc_status.pending_eoi != 0); BUG_ON(ioapic->rtc_status.pending_eoi != 0);
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
ioapic->rtc_status.dest_map); ioapic->rtc_status.dest_map);
ioapic->rtc_status.pending_eoi = ret; ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
} else } else
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);