KVM: move vcpu locking to dispatcher for generic vcpu ioctls

All vcpu ioctls need to be locked, so instead of locking each one specifically
we lock at the generic dispatcher.

This patch only updates generic ioctls and leaves arch specific ioctls alone.

Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
Avi Kivity 2010-05-13 11:25:04 +03:00
parent 1683b2416e
commit 2122ff5eab
7 changed files with 17 additions and 95 deletions

View File

@ -725,8 +725,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
vcpu_load(vcpu);
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@ -748,7 +746,6 @@ out:
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
vcpu_put(vcpu);
return r; return r;
} }
@ -883,8 +880,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
int i; int i;
vcpu_load(vcpu);
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
vpd->vgr[i] = regs->vpd.vgr[i]; vpd->vgr[i] = regs->vpd.vgr[i];
vpd->vbgr[i] = regs->vpd.vbgr[i]; vpd->vbgr[i] = regs->vpd.vbgr[i];
@ -931,8 +926,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu); vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
set_bit(KVM_REQ_RESUME, &vcpu->requests); set_bit(KVM_REQ_RESUME, &vcpu->requests);
vcpu_put(vcpu);
return 0; return 0;
} }
@ -1967,9 +1960,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
vcpu_load(vcpu);
mp_state->mp_state = vcpu->arch.mp_state; mp_state->mp_state = vcpu->arch.mp_state;
vcpu_put(vcpu);
return 0; return 0;
} }
@ -2000,10 +1991,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{ {
int r = 0; int r = 0;
vcpu_load(vcpu);
vcpu->arch.mp_state = mp_state->mp_state; vcpu->arch.mp_state = mp_state->mp_state;
if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED) if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
r = vcpu_reset(vcpu); r = vcpu_reset(vcpu);
vcpu_put(vcpu);
return r; return r;
} }

View File

@ -1047,8 +1047,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
int i; int i;
vcpu_load(vcpu);
regs->pc = kvmppc_get_pc(vcpu); regs->pc = kvmppc_get_pc(vcpu);
regs->cr = kvmppc_get_cr(vcpu); regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = kvmppc_get_ctr(vcpu); regs->ctr = kvmppc_get_ctr(vcpu);
@ -1069,8 +1067,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i); regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
vcpu_put(vcpu);
return 0; return 0;
} }
@ -1078,8 +1074,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
int i; int i;
vcpu_load(vcpu);
kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_pc(vcpu, regs->pc);
kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_cr(vcpu, regs->cr);
kvmppc_set_ctr(vcpu, regs->ctr); kvmppc_set_ctr(vcpu, regs->ctr);
@ -1099,8 +1093,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]); kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
vcpu_put(vcpu);
return 0; return 0;
} }
@ -1110,8 +1102,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i; int i;
vcpu_load(vcpu);
sregs->pvr = vcpu->arch.pvr; sregs->pvr = vcpu->arch.pvr;
sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
@ -1131,8 +1121,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
} }
} }
vcpu_put(vcpu);
return 0; return 0;
} }
@ -1142,8 +1130,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int i; int i;
vcpu_load(vcpu);
kvmppc_set_pvr(vcpu, sregs->pvr); kvmppc_set_pvr(vcpu, sregs->pvr);
vcpu3s->sdr1 = sregs->u.s.sdr1; vcpu3s->sdr1 = sregs->u.s.sdr1;
@ -1171,8 +1157,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
/* Flush the MMU after messing with the segments */ /* Flush the MMU after messing with the segments */
kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_pte_flush(vcpu, 0, 0);
vcpu_put(vcpu);
return 0; return 0;
} }

View File

@ -485,8 +485,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
int i; int i;
vcpu_load(vcpu);
regs->pc = vcpu->arch.pc; regs->pc = vcpu->arch.pc;
regs->cr = kvmppc_get_cr(vcpu); regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr; regs->ctr = vcpu->arch.ctr;
@ -507,8 +505,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i); regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
vcpu_put(vcpu);
return 0; return 0;
} }
@ -516,8 +512,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
int i; int i;
vcpu_load(vcpu);
vcpu->arch.pc = regs->pc; vcpu->arch.pc = regs->pc;
kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr; vcpu->arch.ctr = regs->ctr;
@ -537,8 +531,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]); kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
vcpu_put(vcpu);
return 0; return 0;
} }
@ -569,9 +561,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{ {
int r; int r;
vcpu_load(vcpu);
r = kvmppc_core_vcpu_translate(vcpu, tr); r = kvmppc_core_vcpu_translate(vcpu, tr);
vcpu_put(vcpu);
return r; return r;
} }

View File

@ -423,8 +423,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
vcpu_load(vcpu);
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@ -456,8 +454,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
vcpu_put(vcpu);
return r; return r;
} }

View File

@ -371,55 +371,43 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
vcpu_load(vcpu);
memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs)); memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
vcpu_load(vcpu);
memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs)); memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs) struct kvm_sregs *sregs)
{ {
vcpu_load(vcpu);
memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs)); memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs) struct kvm_sregs *sregs)
{ {
vcpu_load(vcpu);
memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs)); memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
vcpu_load(vcpu);
memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
vcpu->arch.guest_fpregs.fpc = fpu->fpc; vcpu->arch.guest_fpregs.fpc = fpu->fpc;
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
vcpu_load(vcpu);
memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
fpu->fpc = vcpu->arch.guest_fpregs.fpc; fpu->fpc = vcpu->arch.guest_fpregs.fpc;
vcpu_put(vcpu);
return 0; return 0;
} }
@ -498,8 +486,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int rc; int rc;
sigset_t sigsaved; sigset_t sigsaved;
vcpu_load(vcpu);
rerun_vcpu: rerun_vcpu:
if (vcpu->requests) if (vcpu->requests)
if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
@ -568,8 +554,6 @@ rerun_vcpu:
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
vcpu_put(vcpu);
vcpu->stat.exit_userspace++; vcpu->stat.exit_userspace++;
return rc; return rc;
} }

View File

@ -4773,8 +4773,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
vcpu_load(vcpu);
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@ -4815,14 +4813,11 @@ out:
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL); sigprocmask(SIG_SETMASK, &sigsaved, NULL);
vcpu_put(vcpu);
return r; return r;
} }
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
vcpu_load(vcpu);
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
@ -4845,15 +4840,11 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->rip = kvm_rip_read(vcpu); regs->rip = kvm_rip_read(vcpu);
regs->rflags = kvm_get_rflags(vcpu); regs->rflags = kvm_get_rflags(vcpu);
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{ {
vcpu_load(vcpu);
kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
@ -4878,8 +4869,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.exception.pending = false; vcpu->arch.exception.pending = false;
vcpu_put(vcpu);
return 0; return 0;
} }
@ -4898,8 +4887,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
{ {
struct desc_ptr dt; struct desc_ptr dt;
vcpu_load(vcpu);
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
@ -4931,26 +4918,20 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
set_bit(vcpu->arch.interrupt.nr, set_bit(vcpu->arch.interrupt.nr,
(unsigned long *)sregs->interrupt_bitmap); (unsigned long *)sregs->interrupt_bitmap);
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
vcpu_load(vcpu);
mp_state->mp_state = vcpu->arch.mp_state; mp_state->mp_state = vcpu->arch.mp_state;
vcpu_put(vcpu);
return 0; return 0;
} }
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
vcpu_load(vcpu);
vcpu->arch.mp_state = mp_state->mp_state; vcpu->arch.mp_state = mp_state->mp_state;
vcpu_put(vcpu);
return 0; return 0;
} }
@ -4996,8 +4977,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int pending_vec, max_bits; int pending_vec, max_bits;
struct desc_ptr dt; struct desc_ptr dt;
vcpu_load(vcpu);
dt.size = sregs->idt.limit; dt.size = sregs->idt.limit;
dt.address = sregs->idt.base; dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt); kvm_x86_ops->set_idt(vcpu, &dt);
@ -5057,8 +5036,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
!is_protmode(vcpu)) !is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
vcpu_put(vcpu);
return 0; return 0;
} }
@ -5068,12 +5045,10 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
unsigned long rflags; unsigned long rflags;
int i, r; int i, r;
vcpu_load(vcpu);
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
r = -EBUSY; r = -EBUSY;
if (vcpu->arch.exception.pending) if (vcpu->arch.exception.pending)
goto unlock_out; goto out;
if (dbg->control & KVM_GUESTDBG_INJECT_DB) if (dbg->control & KVM_GUESTDBG_INJECT_DB)
kvm_queue_exception(vcpu, DB_VECTOR); kvm_queue_exception(vcpu, DB_VECTOR);
else else
@ -5115,8 +5090,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
r = 0; r = 0;
unlock_out: out:
vcpu_put(vcpu);
return r; return r;
} }
@ -5152,7 +5126,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
gpa_t gpa; gpa_t gpa;
int idx; int idx;
vcpu_load(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
@ -5160,7 +5133,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
tr->valid = gpa != UNMAPPED_GVA; tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1; tr->writeable = 1;
tr->usermode = 0; tr->usermode = 0;
vcpu_put(vcpu);
return 0; return 0;
} }
@ -5169,8 +5141,6 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image; struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
vcpu_load(vcpu);
memcpy(fpu->fpr, fxsave->st_space, 128); memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd; fpu->fcw = fxsave->cwd;
fpu->fsw = fxsave->swd; fpu->fsw = fxsave->swd;
@ -5180,8 +5150,6 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
fpu->last_dp = fxsave->rdp; fpu->last_dp = fxsave->rdp;
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
vcpu_put(vcpu);
return 0; return 0;
} }
@ -5189,8 +5157,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{ {
struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image; struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
vcpu_load(vcpu);
memcpy(fxsave->st_space, fpu->fpr, 128); memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw; fxsave->cwd = fpu->fcw;
fxsave->swd = fpu->fsw; fxsave->swd = fpu->fsw;
@ -5200,8 +5166,6 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
fxsave->rdp = fpu->last_dp; fxsave->rdp = fpu->last_dp;
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
vcpu_put(vcpu);
return 0; return 0;
} }

View File

@ -1392,6 +1392,18 @@ static long kvm_vcpu_ioctl(struct file *filp,
if (vcpu->kvm->mm != current->mm) if (vcpu->kvm->mm != current->mm)
return -EIO; return -EIO;
#if defined(CONFIG_S390) || defined(CONFIG_PPC)
/*
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
* so vcpu_load() would break it.
*/
if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif
vcpu_load(vcpu);
switch (ioctl) { switch (ioctl) {
case KVM_RUN: case KVM_RUN:
r = -EINVAL; r = -EINVAL;
@ -1566,9 +1578,12 @@ out_free2:
break; break;
} }
default: default:
vcpu_put(vcpu);
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
vcpu_load(vcpu);
} }
out: out:
vcpu_put(vcpu);
kfree(fpu); kfree(fpu);
kfree(kvm_sregs); kfree(kvm_sregs);
return r; return r;