Merge branch 'kvm-sev-move-context' into kvm-master
Add support for AMD SEV and SEV-ES intra-host migration support. Intra host migration provides a low-cost mechanism for userspace VMM upgrades. In the common case for intra host migration, we can rely on the normal ioctls for passing data from one VMM to the next. SEV, SEV-ES, and other confidential compute environments make most of this information opaque, and render KVM ioctls such as "KVM_GET_REGS" irrelevant. As a result, we need the ability to pass this opaque metadata from one VMM to the next. The easiest way to do this is to leave this data in the kernel, and transfer ownership of the metadata from one KVM VM (or vCPU) to the next. In-kernel hand off makes it possible to move any data that would be unsafe/impossible for the kernel to hand directly to userspace, and cannot be reproduced using data that can be handed to userspace. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
commit
1f05833193
|
@ -6911,6 +6911,20 @@ MAP_SHARED mmap will result in an -EINVAL return.
|
|||
When enabled the VMM may make use of the ``KVM_ARM_MTE_COPY_TAGS`` ioctl to
|
||||
perform a bulk copy of tags to/from the guest.
|
||||
|
||||
7.29 KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
|
||||
-------------------------------------
|
||||
|
||||
Architectures: x86 SEV enabled
|
||||
Type: vm
|
||||
Parameters: args[0] is the fd of the source vm
|
||||
Returns: 0 on success
|
||||
|
||||
This capability enables userspace to migrate the encryption context from the VM
|
||||
indicated by the fd to the VM this is called on.
|
||||
|
||||
This is intended to support intra-host migration of VMs between userspace VMMs,
|
||||
upgrading the VMM process without interrupting the guest.
|
||||
|
||||
8. Other capabilities.
|
||||
======================
|
||||
|
||||
|
|
|
@ -1476,6 +1476,7 @@ struct kvm_x86_ops {
|
|||
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
||||
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
|
||||
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
|
||||
int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
|
||||
|
||||
int (*get_msr_feature)(struct kvm_msr_entry *entry);
|
||||
|
||||
|
|
|
@ -120,16 +120,26 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
|
||||
{
|
||||
enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
|
||||
return misc_cg_try_charge(type, sev->misc_cg, 1);
|
||||
}
|
||||
|
||||
static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
|
||||
{
|
||||
enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
|
||||
misc_cg_uncharge(type, sev->misc_cg, 1);
|
||||
}
|
||||
|
||||
static int sev_asid_new(struct kvm_sev_info *sev)
|
||||
{
|
||||
int asid, min_asid, max_asid, ret;
|
||||
bool retry = true;
|
||||
enum misc_res_type type;
|
||||
|
||||
type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
|
||||
WARN_ON(sev->misc_cg);
|
||||
sev->misc_cg = get_current_misc_cg();
|
||||
ret = misc_cg_try_charge(type, sev->misc_cg, 1);
|
||||
ret = sev_misc_cg_try_charge(sev);
|
||||
if (ret) {
|
||||
put_misc_cg(sev->misc_cg);
|
||||
sev->misc_cg = NULL;
|
||||
|
@ -162,7 +172,7 @@ again:
|
|||
|
||||
return asid;
|
||||
e_uncharge:
|
||||
misc_cg_uncharge(type, sev->misc_cg, 1);
|
||||
sev_misc_cg_uncharge(sev);
|
||||
put_misc_cg(sev->misc_cg);
|
||||
sev->misc_cg = NULL;
|
||||
return ret;
|
||||
|
@ -179,7 +189,6 @@ static void sev_asid_free(struct kvm_sev_info *sev)
|
|||
{
|
||||
struct svm_cpu_data *sd;
|
||||
int cpu;
|
||||
enum misc_res_type type;
|
||||
|
||||
mutex_lock(&sev_bitmap_lock);
|
||||
|
||||
|
@ -192,8 +201,7 @@ static void sev_asid_free(struct kvm_sev_info *sev)
|
|||
|
||||
mutex_unlock(&sev_bitmap_lock);
|
||||
|
||||
type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
|
||||
misc_cg_uncharge(type, sev->misc_cg, 1);
|
||||
sev_misc_cg_uncharge(sev);
|
||||
put_misc_cg(sev->misc_cg);
|
||||
sev->misc_cg = NULL;
|
||||
}
|
||||
|
@ -590,7 +598,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
|
|||
* traditional VMSA as it has been built so far (in prep
|
||||
* for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
|
||||
*/
|
||||
memcpy(svm->vmsa, save, sizeof(*save));
|
||||
memcpy(svm->sev_es.vmsa, save, sizeof(*save));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -612,11 +620,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
|||
* the VMSA memory content (i.e it will write the same memory region
|
||||
* with the guest's key), so invalidate it first.
|
||||
*/
|
||||
clflush_cache_range(svm->vmsa, PAGE_SIZE);
|
||||
clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
|
||||
|
||||
vmsa.reserved = 0;
|
||||
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vmsa.address = __sme_pa(svm->vmsa);
|
||||
vmsa.address = __sme_pa(svm->sev_es.vmsa);
|
||||
vmsa.len = PAGE_SIZE;
|
||||
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
if (ret)
|
||||
|
@ -1536,6 +1544,204 @@ static bool cmd_allowed_from_miror(u32 cmd_id)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int sev_lock_for_migration(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
|
||||
/*
|
||||
* Bail if this VM is already involved in a migration to avoid deadlock
|
||||
* between two VMs trying to migrate to/from each other.
|
||||
*/
|
||||
if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
|
||||
return -EBUSY;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sev_unlock_after_migration(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
atomic_set_release(&sev->migration_in_progress, 0);
|
||||
}
|
||||
|
||||
|
||||
static int sev_lock_vcpus_for_migration(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, j;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (mutex_lock_killable(&vcpu->mutex))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
kvm_for_each_vcpu(j, vcpu, kvm) {
|
||||
if (i == j)
|
||||
break;
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void sev_migrate_from(struct kvm_sev_info *dst,
|
||||
struct kvm_sev_info *src)
|
||||
{
|
||||
dst->active = true;
|
||||
dst->asid = src->asid;
|
||||
dst->handle = src->handle;
|
||||
dst->pages_locked = src->pages_locked;
|
||||
|
||||
src->asid = 0;
|
||||
src->active = false;
|
||||
src->handle = 0;
|
||||
src->pages_locked = 0;
|
||||
|
||||
if (dst->misc_cg != src->misc_cg)
|
||||
sev_misc_cg_uncharge(src);
|
||||
|
||||
put_misc_cg(src->misc_cg);
|
||||
src->misc_cg = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&dst->regions_list);
|
||||
list_replace_init(&src->regions_list, &dst->regions_list);
|
||||
}
|
||||
|
||||
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
|
||||
{
|
||||
int i;
|
||||
struct kvm_vcpu *dst_vcpu, *src_vcpu;
|
||||
struct vcpu_svm *dst_svm, *src_svm;
|
||||
|
||||
if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
|
||||
return -EINVAL;
|
||||
|
||||
kvm_for_each_vcpu(i, src_vcpu, src) {
|
||||
if (!src_vcpu->arch.guest_state_protected)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_for_each_vcpu(i, src_vcpu, src) {
|
||||
src_svm = to_svm(src_vcpu);
|
||||
dst_vcpu = kvm_get_vcpu(dst, i);
|
||||
dst_svm = to_svm(dst_vcpu);
|
||||
|
||||
/*
|
||||
* Transfer VMSA and GHCB state to the destination. Nullify and
|
||||
* clear source fields as appropriate, the state now belongs to
|
||||
* the destination.
|
||||
*/
|
||||
memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
|
||||
dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
|
||||
dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
|
||||
dst_vcpu->arch.guest_state_protected = true;
|
||||
|
||||
memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
|
||||
src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
|
||||
src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
|
||||
src_vcpu->arch.guest_state_protected = false;
|
||||
}
|
||||
to_kvm_svm(src)->sev_info.es_active = false;
|
||||
to_kvm_svm(dst)->sev_info.es_active = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
|
||||
{
|
||||
struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
|
||||
struct kvm_sev_info *src_sev;
|
||||
struct file *source_kvm_file;
|
||||
struct kvm *source_kvm;
|
||||
int ret;
|
||||
|
||||
ret = sev_lock_for_migration(kvm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sev_guest(kvm)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
source_kvm_file = fget(source_fd);
|
||||
if (!file_is_kvm(source_kvm_file)) {
|
||||
ret = -EBADF;
|
||||
goto out_fput;
|
||||
}
|
||||
|
||||
source_kvm = source_kvm_file->private_data;
|
||||
ret = sev_lock_for_migration(source_kvm);
|
||||
if (ret)
|
||||
goto out_fput;
|
||||
|
||||
if (!sev_guest(source_kvm)) {
|
||||
ret = -EINVAL;
|
||||
goto out_source;
|
||||
}
|
||||
|
||||
src_sev = &to_kvm_svm(source_kvm)->sev_info;
|
||||
dst_sev->misc_cg = get_current_misc_cg();
|
||||
if (dst_sev->misc_cg != src_sev->misc_cg) {
|
||||
ret = sev_misc_cg_try_charge(dst_sev);
|
||||
if (ret)
|
||||
goto out_dst_put_cgroup;
|
||||
}
|
||||
|
||||
ret = sev_lock_vcpus_for_migration(kvm);
|
||||
if (ret)
|
||||
goto out_dst_cgroup;
|
||||
ret = sev_lock_vcpus_for_migration(source_kvm);
|
||||
if (ret)
|
||||
goto out_dst_vcpu;
|
||||
|
||||
if (sev_es_guest(source_kvm)) {
|
||||
ret = sev_es_migrate_from(kvm, source_kvm);
|
||||
if (ret)
|
||||
goto out_source_vcpu;
|
||||
}
|
||||
sev_migrate_from(dst_sev, src_sev);
|
||||
kvm_vm_dead(source_kvm);
|
||||
ret = 0;
|
||||
|
||||
out_source_vcpu:
|
||||
sev_unlock_vcpus_for_migration(source_kvm);
|
||||
out_dst_vcpu:
|
||||
sev_unlock_vcpus_for_migration(kvm);
|
||||
out_dst_cgroup:
|
||||
if (ret < 0) {
|
||||
sev_misc_cg_uncharge(dst_sev);
|
||||
out_dst_put_cgroup:
|
||||
put_misc_cg(dst_sev->misc_cg);
|
||||
dst_sev->misc_cg = NULL;
|
||||
}
|
||||
out_source:
|
||||
sev_unlock_after_migration(source_kvm);
|
||||
out_fput:
|
||||
if (source_kvm_file)
|
||||
fput(source_kvm_file);
|
||||
out_unlock:
|
||||
sev_unlock_after_migration(kvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
|
||||
{
|
||||
struct kvm_sev_cmd sev_cmd;
|
||||
|
@ -2038,16 +2244,16 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
|
|||
svm = to_svm(vcpu);
|
||||
|
||||
if (vcpu->arch.guest_state_protected)
|
||||
sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
|
||||
__free_page(virt_to_page(svm->vmsa));
|
||||
sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
|
||||
__free_page(virt_to_page(svm->sev_es.vmsa));
|
||||
|
||||
if (svm->ghcb_sa_free)
|
||||
kfree(svm->ghcb_sa);
|
||||
if (svm->sev_es.ghcb_sa_free)
|
||||
kfree(svm->sev_es.ghcb_sa);
|
||||
}
|
||||
|
||||
static void dump_ghcb(struct vcpu_svm *svm)
|
||||
{
|
||||
struct ghcb *ghcb = svm->ghcb;
|
||||
struct ghcb *ghcb = svm->sev_es.ghcb;
|
||||
unsigned int nbits;
|
||||
|
||||
/* Re-use the dump_invalid_vmcb module parameter */
|
||||
|
@ -2073,7 +2279,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
|
|||
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
struct ghcb *ghcb = svm->ghcb;
|
||||
struct ghcb *ghcb = svm->sev_es.ghcb;
|
||||
|
||||
/*
|
||||
* The GHCB protocol so far allows for the following data
|
||||
|
@ -2093,7 +2299,7 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
|||
{
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
struct ghcb *ghcb = svm->ghcb;
|
||||
struct ghcb *ghcb = svm->sev_es.ghcb;
|
||||
u64 exit_code;
|
||||
|
||||
/*
|
||||
|
@ -2140,7 +2346,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
|
|||
struct ghcb *ghcb;
|
||||
u64 exit_code = 0;
|
||||
|
||||
ghcb = svm->ghcb;
|
||||
ghcb = svm->sev_es.ghcb;
|
||||
|
||||
/* Only GHCB Usage code 0 is supported */
|
||||
if (ghcb->ghcb_usage)
|
||||
|
@ -2258,33 +2464,34 @@ vmgexit_err:
|
|||
|
||||
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
||||
{
|
||||
if (!svm->ghcb)
|
||||
if (!svm->sev_es.ghcb)
|
||||
return;
|
||||
|
||||
if (svm->ghcb_sa_free) {
|
||||
if (svm->sev_es.ghcb_sa_free) {
|
||||
/*
|
||||
* The scratch area lives outside the GHCB, so there is a
|
||||
* buffer that, depending on the operation performed, may
|
||||
* need to be synced, then freed.
|
||||
*/
|
||||
if (svm->ghcb_sa_sync) {
|
||||
if (svm->sev_es.ghcb_sa_sync) {
|
||||
kvm_write_guest(svm->vcpu.kvm,
|
||||
ghcb_get_sw_scratch(svm->ghcb),
|
||||
svm->ghcb_sa, svm->ghcb_sa_len);
|
||||
svm->ghcb_sa_sync = false;
|
||||
ghcb_get_sw_scratch(svm->sev_es.ghcb),
|
||||
svm->sev_es.ghcb_sa,
|
||||
svm->sev_es.ghcb_sa_len);
|
||||
svm->sev_es.ghcb_sa_sync = false;
|
||||
}
|
||||
|
||||
kfree(svm->ghcb_sa);
|
||||
svm->ghcb_sa = NULL;
|
||||
svm->ghcb_sa_free = false;
|
||||
kfree(svm->sev_es.ghcb_sa);
|
||||
svm->sev_es.ghcb_sa = NULL;
|
||||
svm->sev_es.ghcb_sa_free = false;
|
||||
}
|
||||
|
||||
trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
|
||||
trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
|
||||
|
||||
sev_es_sync_to_ghcb(svm);
|
||||
|
||||
kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
|
||||
svm->ghcb = NULL;
|
||||
kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
|
||||
svm->sev_es.ghcb = NULL;
|
||||
}
|
||||
|
||||
void pre_sev_run(struct vcpu_svm *svm, int cpu)
|
||||
|
@ -2314,7 +2521,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
|
|||
static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
||||
{
|
||||
struct vmcb_control_area *control = &svm->vmcb->control;
|
||||
struct ghcb *ghcb = svm->ghcb;
|
||||
struct ghcb *ghcb = svm->sev_es.ghcb;
|
||||
u64 ghcb_scratch_beg, ghcb_scratch_end;
|
||||
u64 scratch_gpa_beg, scratch_gpa_end;
|
||||
void *scratch_va;
|
||||
|
@ -2350,7 +2557,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
|||
return false;
|
||||
}
|
||||
|
||||
scratch_va = (void *)svm->ghcb;
|
||||
scratch_va = (void *)svm->sev_es.ghcb;
|
||||
scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
|
||||
} else {
|
||||
/*
|
||||
|
@ -2380,12 +2587,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
|
|||
* the vCPU next time (i.e. a read was requested so the data
|
||||
* must be written back to the guest memory).
|
||||
*/
|
||||
svm->ghcb_sa_sync = sync;
|
||||
svm->ghcb_sa_free = true;
|
||||
svm->sev_es.ghcb_sa_sync = sync;
|
||||
svm->sev_es.ghcb_sa_free = true;
|
||||
}
|
||||
|
||||
svm->ghcb_sa = scratch_va;
|
||||
svm->ghcb_sa_len = len;
|
||||
svm->sev_es.ghcb_sa = scratch_va;
|
||||
svm->sev_es.ghcb_sa_len = len;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -2504,15 +2711,15 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
|
||||
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
|
||||
/* Unable to map GHCB from guest */
|
||||
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
|
||||
ghcb_gpa);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
svm->ghcb = svm->ghcb_map.hva;
|
||||
ghcb = svm->ghcb_map.hva;
|
||||
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
|
||||
ghcb = svm->sev_es.ghcb_map.hva;
|
||||
|
||||
trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
|
||||
|
||||
|
@ -2535,7 +2742,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|||
ret = kvm_sev_es_mmio_read(vcpu,
|
||||
control->exit_info_1,
|
||||
control->exit_info_2,
|
||||
svm->ghcb_sa);
|
||||
svm->sev_es.ghcb_sa);
|
||||
break;
|
||||
case SVM_VMGEXIT_MMIO_WRITE:
|
||||
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
|
||||
|
@ -2544,7 +2751,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
|
|||
ret = kvm_sev_es_mmio_write(vcpu,
|
||||
control->exit_info_1,
|
||||
control->exit_info_2,
|
||||
svm->ghcb_sa);
|
||||
svm->sev_es.ghcb_sa);
|
||||
break;
|
||||
case SVM_VMGEXIT_NMI_COMPLETE:
|
||||
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
|
||||
|
@ -2604,7 +2811,8 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
|||
if (!setup_vmgexit_scratch(svm, in, bytes))
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->ghcb_sa, count, in);
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
|
||||
count, in);
|
||||
}
|
||||
|
||||
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
|
@ -2619,7 +2827,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
|
|||
* VMCB page. Do not include the encryption mask on the VMSA physical
|
||||
* address since hardware will access it using the guest key.
|
||||
*/
|
||||
svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
|
||||
svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
|
||||
|
||||
/* Can't intercept CR register access, HV can't modify CR registers */
|
||||
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
|
||||
|
@ -2691,8 +2899,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
|
|||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
/* First SIPI: Use the values as initially set by the VMM */
|
||||
if (!svm->received_first_sipi) {
|
||||
svm->received_first_sipi = true;
|
||||
if (!svm->sev_es.received_first_sipi) {
|
||||
svm->sev_es.received_first_sipi = true;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2701,8 +2909,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
|
|||
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
|
||||
* non-zero value.
|
||||
*/
|
||||
if (!svm->ghcb)
|
||||
if (!svm->sev_es.ghcb)
|
||||
return;
|
||||
|
||||
ghcb_set_sw_exit_info_2(svm->ghcb, 1);
|
||||
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
|
||||
}
|
||||
|
|
|
@ -1452,7 +1452,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
|
|||
svm_switch_vmcb(svm, &svm->vmcb01);
|
||||
|
||||
if (vmsa_page)
|
||||
svm->vmsa = page_address(vmsa_page);
|
||||
svm->sev_es.vmsa = page_address(vmsa_page);
|
||||
|
||||
svm->guest_state_loaded = false;
|
||||
|
||||
|
@ -2835,11 +2835,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
|
||||
if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
|
||||
return kvm_complete_insn_gp(vcpu, err);
|
||||
|
||||
ghcb_set_sw_exit_info_1(svm->ghcb, 1);
|
||||
ghcb_set_sw_exit_info_2(svm->ghcb,
|
||||
ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
|
||||
ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
|
||||
X86_TRAP_GP |
|
||||
SVM_EVTINJ_TYPE_EXEPT |
|
||||
SVM_EVTINJ_VALID);
|
||||
|
@ -4701,6 +4701,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||
.mem_enc_unreg_region = svm_unregister_enc_region,
|
||||
|
||||
.vm_copy_enc_context_from = svm_vm_copy_asid_from,
|
||||
.vm_move_enc_context_from = svm_vm_migrate_from,
|
||||
|
||||
.can_emulate_instruction = svm_can_emulate_instruction,
|
||||
|
||||
|
|
|
@ -80,6 +80,7 @@ struct kvm_sev_info {
|
|||
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
|
||||
struct kvm *enc_context_owner; /* Owner of copied encryption context */
|
||||
struct misc_cg *misc_cg; /* For misc cgroup accounting */
|
||||
atomic_t migration_in_progress;
|
||||
};
|
||||
|
||||
struct kvm_svm {
|
||||
|
@ -123,6 +124,20 @@ struct svm_nested_state {
|
|||
bool initialized;
|
||||
};
|
||||
|
||||
struct vcpu_sev_es_state {
|
||||
/* SEV-ES support */
|
||||
struct vmcb_save_area *vmsa;
|
||||
struct ghcb *ghcb;
|
||||
struct kvm_host_map ghcb_map;
|
||||
bool received_first_sipi;
|
||||
|
||||
/* SEV-ES scratch area support */
|
||||
void *ghcb_sa;
|
||||
u32 ghcb_sa_len;
|
||||
bool ghcb_sa_sync;
|
||||
bool ghcb_sa_free;
|
||||
};
|
||||
|
||||
struct vcpu_svm {
|
||||
struct kvm_vcpu vcpu;
|
||||
/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
|
||||
|
@ -186,17 +201,7 @@ struct vcpu_svm {
|
|||
DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
|
||||
} shadow_msr_intercept;
|
||||
|
||||
/* SEV-ES support */
|
||||
struct vmcb_save_area *vmsa;
|
||||
struct ghcb *ghcb;
|
||||
struct kvm_host_map ghcb_map;
|
||||
bool received_first_sipi;
|
||||
|
||||
/* SEV-ES scratch area support */
|
||||
void *ghcb_sa;
|
||||
u32 ghcb_sa_len;
|
||||
bool ghcb_sa_sync;
|
||||
bool ghcb_sa_free;
|
||||
struct vcpu_sev_es_state sev_es;
|
||||
|
||||
bool guest_state_loaded;
|
||||
};
|
||||
|
@ -558,6 +563,7 @@ int svm_register_enc_region(struct kvm *kvm,
|
|||
int svm_unregister_enc_region(struct kvm *kvm,
|
||||
struct kvm_enc_region *range);
|
||||
int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
|
||||
int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
|
||||
void pre_sev_run(struct vcpu_svm *svm, int cpu);
|
||||
void __init sev_set_cpu_caps(void);
|
||||
void __init sev_hardware_setup(void);
|
||||
|
|
|
@ -5728,6 +5728,12 @@ split_irqchip_unlock:
|
|||
if (kvm_x86_ops.vm_copy_enc_context_from)
|
||||
r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]);
|
||||
return r;
|
||||
case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
|
||||
r = -EINVAL;
|
||||
if (kvm_x86_ops.vm_move_enc_context_from)
|
||||
r = kvm_x86_ops.vm_move_enc_context_from(
|
||||
kvm, cap->args[0]);
|
||||
return r;
|
||||
case KVM_CAP_EXIT_HYPERCALL:
|
||||
if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
|
||||
r = -EINVAL;
|
||||
|
@ -9552,7 +9558,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
if (kvm_request_pending(vcpu)) {
|
||||
if (kvm_check_request(KVM_REQ_VM_BUGGED, vcpu)) {
|
||||
if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
|
||||
r = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ static inline bool is_error_page(struct page *page)
|
|||
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_UNBLOCK 2
|
||||
#define KVM_REQ_UNHALT 3
|
||||
#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQUEST_ARCH_BASE 8
|
||||
|
||||
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
|
||||
|
@ -617,6 +617,7 @@ struct kvm {
|
|||
unsigned int max_halt_poll_ns;
|
||||
u32 dirty_ring_size;
|
||||
bool vm_bugged;
|
||||
bool vm_dead;
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
|
||||
struct notifier_block pm_notifier;
|
||||
|
@ -650,12 +651,19 @@ struct kvm {
|
|||
#define vcpu_err(vcpu, fmt, ...) \
|
||||
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
|
||||
|
||||
static inline void kvm_vm_dead(struct kvm *kvm)
|
||||
{
|
||||
kvm->vm_dead = true;
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
|
||||
}
|
||||
|
||||
static inline void kvm_vm_bugged(struct kvm *kvm)
|
||||
{
|
||||
kvm->vm_bugged = true;
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED);
|
||||
kvm_vm_dead(kvm);
|
||||
}
|
||||
|
||||
|
||||
#define KVM_BUG(cond, kvm, fmt...) \
|
||||
({ \
|
||||
int __ret = (cond); \
|
||||
|
|
|
@ -1130,6 +1130,7 @@ struct kvm_ppc_resize_hpt {
|
|||
#define KVM_CAP_BINARY_STATS_FD 203
|
||||
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
|
||||
#define KVM_CAP_ARM_MTE 205
|
||||
#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
|
@ -73,7 +73,8 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
|
|||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
|
||||
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
|
||||
TEST_GEN_PROGS_x86_64 += demand_paging_test
|
||||
TEST_GEN_PROGS_x86_64 += dirty_log_test
|
||||
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
|
||||
|
|
|
@ -82,6 +82,7 @@ struct vm_guest_mode_params {
|
|||
};
|
||||
extern const struct vm_guest_mode_params vm_guest_mode_params[];
|
||||
|
||||
int open_path_or_exit(const char *path, int flags);
|
||||
int open_kvm_dev_path_or_exit(void);
|
||||
int kvm_check_cap(long cap);
|
||||
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
|
||||
|
|
|
@ -46,4 +46,6 @@ static inline bool cpu_has_svm(void)
|
|||
return ecx & CPUID_SVM;
|
||||
}
|
||||
|
||||
int open_sev_dev_path_or_exit(void);
|
||||
|
||||
#endif /* SELFTEST_KVM_SVM_UTILS_H */
|
||||
|
|
|
@ -31,6 +31,19 @@ static void *align(void *x, size_t size)
|
|||
return (void *) (((size_t) x + mask) & ~mask);
|
||||
}
|
||||
|
||||
int open_path_or_exit(const char *path, int flags)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = open(path, flags);
|
||||
if (fd < 0) {
|
||||
print_skip("%s not available (errno: %d)", path, errno);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
/*
|
||||
* Open KVM_DEV_PATH if available, otherwise exit the entire program.
|
||||
*
|
||||
|
@ -42,16 +55,7 @@ static void *align(void *x, size_t size)
|
|||
*/
|
||||
static int _open_kvm_dev_path_or_exit(int flags)
|
||||
{
|
||||
int fd;
|
||||
|
||||
fd = open(KVM_DEV_PATH, flags);
|
||||
if (fd < 0) {
|
||||
print_skip("%s not available, is KVM loaded? (errno: %d)",
|
||||
KVM_DEV_PATH, errno);
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
return fd;
|
||||
return open_path_or_exit(KVM_DEV_PATH, flags);
|
||||
}
|
||||
|
||||
int open_kvm_dev_path_or_exit(void)
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include "processor.h"
|
||||
#include "svm_util.h"
|
||||
|
||||
#define SEV_DEV_PATH "/dev/sev"
|
||||
|
||||
struct gpr64_regs guest_regs;
|
||||
u64 rflags;
|
||||
|
||||
|
@ -172,3 +174,14 @@ void nested_svm_check_supported(void)
|
|||
exit(KSFT_SKIP);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Open SEV_DEV_PATH if available, otherwise exit the entire program.
|
||||
*
|
||||
* Return:
|
||||
* The opened file descriptor of /dev/sev.
|
||||
*/
|
||||
int open_sev_dev_path_or_exit(void)
|
||||
{
|
||||
return open_path_or_exit(SEV_DEV_PATH, 0);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/psp-sev.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <stdlib.h>
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "svm_util.h"
|
||||
#include "kselftest.h"
|
||||
#include "../lib/kvm_util_internal.h"
|
||||
|
||||
#define SEV_POLICY_ES 0b100
|
||||
|
||||
#define NR_MIGRATE_TEST_VCPUS 4
|
||||
#define NR_MIGRATE_TEST_VMS 3
|
||||
#define NR_LOCK_TESTING_THREADS 3
|
||||
#define NR_LOCK_TESTING_ITERATIONS 10000
|
||||
|
||||
static void sev_ioctl(int vm_fd, int cmd_id, void *data)
|
||||
{
|
||||
struct kvm_sev_cmd cmd = {
|
||||
.id = cmd_id,
|
||||
.data = (uint64_t)data,
|
||||
.sev_fd = open_sev_dev_path_or_exit(),
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
|
||||
TEST_ASSERT((ret == 0 || cmd.error == SEV_RET_SUCCESS),
|
||||
"%d failed: return code: %d, errno: %d, fw error: %d",
|
||||
cmd_id, ret, errno, cmd.error);
|
||||
}
|
||||
|
||||
static struct kvm_vm *sev_vm_create(bool es)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
struct kvm_sev_launch_start start = { 0 };
|
||||
int i;
|
||||
|
||||
vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
|
||||
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
|
||||
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
|
||||
vm_vcpu_add(vm, i);
|
||||
if (es)
|
||||
start.policy |= SEV_POLICY_ES;
|
||||
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
|
||||
if (es)
|
||||
sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
|
||||
return vm;
|
||||
}
|
||||
|
||||
static struct kvm_vm *__vm_create(void)
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
int i;
|
||||
|
||||
vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
|
||||
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
|
||||
vm_vcpu_add(vm, i);
|
||||
|
||||
return vm;
|
||||
}
|
||||
|
||||
static int __sev_migrate_from(int dst_fd, int src_fd)
|
||||
{
|
||||
struct kvm_enable_cap cap = {
|
||||
.cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
|
||||
.args = { src_fd }
|
||||
};
|
||||
|
||||
return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
|
||||
}
|
||||
|
||||
|
||||
static void sev_migrate_from(int dst_fd, int src_fd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __sev_migrate_from(dst_fd, src_fd);
|
||||
TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
|
||||
}
|
||||
|
||||
static void test_sev_migrate_from(bool es)
|
||||
{
|
||||
struct kvm_vm *src_vm;
|
||||
struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
|
||||
int i;
|
||||
|
||||
src_vm = sev_vm_create(es);
|
||||
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
|
||||
dst_vms[i] = __vm_create();
|
||||
|
||||
/* Initial migration from the src to the first dst. */
|
||||
sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
|
||||
|
||||
for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
|
||||
sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
|
||||
|
||||
/* Migrate the guest back to the original VM. */
|
||||
sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
|
||||
|
||||
kvm_vm_free(src_vm);
|
||||
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
|
||||
kvm_vm_free(dst_vms[i]);
|
||||
}
|
||||
|
||||
struct locking_thread_input {
|
||||
struct kvm_vm *vm;
|
||||
int source_fds[NR_LOCK_TESTING_THREADS];
|
||||
};
|
||||
|
||||
static void *locking_test_thread(void *arg)
|
||||
{
|
||||
int i, j;
|
||||
struct locking_thread_input *input = (struct locking_thread_input *)arg;
|
||||
|
||||
for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
|
||||
j = i % NR_LOCK_TESTING_THREADS;
|
||||
__sev_migrate_from(input->vm->fd, input->source_fds[j]);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void test_sev_migrate_locking(void)
|
||||
{
|
||||
struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
|
||||
pthread_t pt[NR_LOCK_TESTING_THREADS];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
|
||||
input[i].vm = sev_vm_create(/* es= */ false);
|
||||
input[0].source_fds[i] = input[i].vm->fd;
|
||||
}
|
||||
for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
|
||||
memcpy(input[i].source_fds, input[0].source_fds,
|
||||
sizeof(input[i].source_fds));
|
||||
|
||||
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
|
||||
pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
|
||||
|
||||
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
|
||||
pthread_join(pt[i], NULL);
|
||||
}
|
||||
|
||||
static void test_sev_migrate_parameters(void)
|
||||
{
|
||||
struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
|
||||
*sev_es_vm_no_vmsa;
|
||||
int ret;
|
||||
|
||||
sev_vm = sev_vm_create(/* es= */ false);
|
||||
sev_es_vm = sev_vm_create(/* es= */ true);
|
||||
vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
|
||||
vm_no_sev = __vm_create();
|
||||
sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
|
||||
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
|
||||
vm_vcpu_add(sev_es_vm_no_vmsa, 1);
|
||||
|
||||
|
||||
ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
|
||||
TEST_ASSERT(
|
||||
ret == -1 && errno == EINVAL,
|
||||
"Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
|
||||
ret, errno);
|
||||
|
||||
ret = __sev_migrate_from(sev_es_vm->fd, sev_vm->fd);
|
||||
TEST_ASSERT(
|
||||
ret == -1 && errno == EINVAL,
|
||||
"Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
|
||||
ret, errno);
|
||||
|
||||
ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm->fd);
|
||||
TEST_ASSERT(
|
||||
ret == -1 && errno == EINVAL,
|
||||
"SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
|
||||
ret, errno);
|
||||
|
||||
ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm_no_vmsa->fd);
|
||||
TEST_ASSERT(
|
||||
ret == -1 && errno == EINVAL,
|
||||
"SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
|
||||
ret, errno);
|
||||
|
||||
ret = __sev_migrate_from(vm_no_vcpu->fd, vm_no_sev->fd);
|
||||
TEST_ASSERT(ret == -1 && errno == EINVAL,
|
||||
"Migrations require SEV enabled. ret %d, errno: %d\n", ret,
|
||||
errno);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
test_sev_migrate_from(/* es= */ false);
|
||||
test_sev_migrate_from(/* es= */ true);
|
||||
test_sev_migrate_locking();
|
||||
test_sev_migrate_parameters();
|
||||
return 0;
|
||||
}
|
|
@ -3747,7 +3747,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
|
|||
struct kvm_fpu *fpu = NULL;
|
||||
struct kvm_sregs *kvm_sregs = NULL;
|
||||
|
||||
if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
|
||||
if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
|
||||
return -EIO;
|
||||
|
||||
if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
|
||||
|
@ -3957,7 +3957,7 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
|
|||
void __user *argp = compat_ptr(arg);
|
||||
int r;
|
||||
|
||||
if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
|
||||
if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
|
||||
return -EIO;
|
||||
|
||||
switch (ioctl) {
|
||||
|
@ -4023,7 +4023,7 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
|
|||
{
|
||||
struct kvm_device *dev = filp->private_data;
|
||||
|
||||
if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged)
|
||||
if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
|
||||
return -EIO;
|
||||
|
||||
switch (ioctl) {
|
||||
|
@ -4345,7 +4345,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
|||
void __user *argp = (void __user *)arg;
|
||||
int r;
|
||||
|
||||
if (kvm->mm != current->mm || kvm->vm_bugged)
|
||||
if (kvm->mm != current->mm || kvm->vm_dead)
|
||||
return -EIO;
|
||||
switch (ioctl) {
|
||||
case KVM_CREATE_VCPU:
|
||||
|
@ -4556,7 +4556,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
|
|||
struct kvm *kvm = filp->private_data;
|
||||
int r;
|
||||
|
||||
if (kvm->mm != current->mm || kvm->vm_bugged)
|
||||
if (kvm->mm != current->mm || kvm->vm_dead)
|
||||
return -EIO;
|
||||
switch (ioctl) {
|
||||
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
|
|
Loading…
Reference in New Issue