From 5dbeddc60244b1c361ac7a0e332fa83806df576d Mon Sep 17 00:00:00 2001 From: Eduardo Habkost <ehabkost@redhat.com> Date: Wed, 2 Sep 2009 16:15:07 -0300 Subject: [PATCH 1/2] add nmi support to svm Message-id: <20090826140150.GG30093@redhat.com> Author: Gleb Natapov <gleb@redhat.com> Patchwork-id: 3314 O-Subject: [PATCH] add nmi support to svm Bugzilla: 502543 503322 492290 CVE: RH-Acked-by: Avi Kivity <avi@redhat.com> RH-Acked-by: Chris Lalancette <clalance@redhat.com> RH-Acked-by: Andrea Arcangeli <aarcange@redhat.com> Allow NMI injection on SVM. BZ: 502543, 503322, 492290 Upstream status: functionality is there, code is different, backport is impossible Signed-off-by: Gleb Natapov <gleb@redhat.com> -- Gleb. Signed-off-by: Eduardo Habkost <ehabkost@redhat.com> --- arch/x86/include/asm/kvm_host.h | 4 ++ arch/x86/kvm/svm.c | 82 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 81 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f6075e3..9df8154 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -258,6 +258,7 @@ struct kvm_vcpu_arch { unsigned long cr3; unsigned long cr4; unsigned long cr8; + u32 hflags; u64 pdptrs[4]; /* pae */ u64 shadow_efer; u64 apic_base; @@ -741,6 +742,9 @@ enum { TASK_SWITCH_GATE = 3, }; +#define HF_NMI_MASK (1 << 3) +#define HF_IRET_MASK (1 << 4) + /* * Hardware virtualization extension instructions may fault if a * reboot turns off virtualization while processes are running. diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c7dcbd1..db458c8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1051,6 +1051,17 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); } +static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + if (svm->vcpu.guest_debug.singlestep) { + svm->vcpu.guest_debug.singlestep = false; + svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); + } else + kvm_queue_exception(&svm->vcpu, DB_VECTOR); + + return 1; +} + static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { int er; @@ -1190,6 +1201,14 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) return 1; } +static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) +{ + ++svm->vcpu.stat.nmi_window_exits; + svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET); + svm->vcpu.arch.hflags |= HF_IRET_MASK; + return 1; +} + static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) { if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE) @@ -1437,6 +1456,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_WRITE_DR3] = emulate_on_interception, [SVM_EXIT_WRITE_DR5] = emulate_on_interception, [SVM_EXIT_WRITE_DR7] = emulate_on_interception, + [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, @@ -1448,6 +1468,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, [SVM_EXIT_VINTR] = interrupt_window_interception, /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ [SVM_EXIT_CPUID] = cpuid_interception, + [SVM_EXIT_IRET] = iret_interception, [SVM_EXIT_INVD] = emulate_on_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, @@ -1477,6 +1498,9 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, (u32)((u64)svm->vmcb->save.rip >> 32), entryexit); + if (svm->vcpu.arch.hflags & HF_IRET_MASK) + svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); + if (npt_enabled) { int mmu_reload = 0; if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { @@ -1545,6 +1569,39 @@ static void pre_svm_run(struct vcpu_svm *svm) new_asid(svm, svm_data); } +static void enable_nmi_window(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) + == HF_NMI_MASK) + return; /* IRET will cause a vm exit */ + + /* Something prevents NMI from been injected. Single step over + possible problem (IRET or exception injection or interrupt + shadow) */ + svm->vcpu.guest_debug.singlestep = true; + svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); + svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR); +} + +static int svm_nmi_allowed(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + struct vmcb *vmcb = svm->vmcb; + return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && + !(svm->vcpu.arch.hflags & HF_NMI_MASK); +} + +static void svm_inject_nmi(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; + vcpu->arch.hflags |= HF_NMI_MASK; + svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET); + ++vcpu->stat.nmi_injections; +} static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) { @@ -1594,15 +1651,30 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu) struct vmcb *vmcb = svm->vmcb; int intr_vector = -1; - if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) && - ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) { - intr_vector = vmcb->control.exit_int_info & - SVM_EVTINJ_VEC_MASK; + if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID)) { + int ev = vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK; + switch (ev) { + case SVM_EVTINJ_TYPE_INTR: + intr_vector = vmcb->control.exit_int_info & + SVM_EVTINJ_VEC_MASK; + svm_inject_irq(svm, intr_vector); + break; + case SVM_EVTINJ_TYPE_NMI: + svm_inject_nmi(vcpu); + break; + } vmcb->control.exit_int_info = 0; - svm_inject_irq(svm, intr_vector); goto out; } + if (vcpu->arch.nmi_pending) { + if (svm_nmi_allowed(vcpu)) { + svm_inject_nmi(vcpu); + vcpu->arch.nmi_pending = false; + } else + enable_nmi_window(vcpu); + } + if (vmcb->control.int_ctl & V_IRQ_MASK) goto out; -- 1.6.3.rc4.29.g8146