Commit 5addc235 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: VMX: Cache vmcs.EXIT_QUALIFICATION using arch avail_reg flags



Introduce a new "extended register" type, EXIT_INFO_1 (to pair with the
nomenclature in .get_exit_info()), and use it to cache VMX's
vmcs.EXIT_QUALIFICATION.

Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200415203454.8296-5-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ec0241f3
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -170,6 +170,7 @@ enum kvm_reg {
	VCPU_EXREG_CR3,
	VCPU_EXREG_RFLAGS,
	VCPU_EXREG_SEGMENTS,
	VCPU_EXREG_EXIT_INFO_1,
};

enum {
+10 −10
Original line number Diff line number Diff line
@@ -4604,7 +4604,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
	gva_t gva;
	struct x86_exception e;

	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
				vmcs_read32(VMX_INSTRUCTION_INFO), false,
				sizeof(*vmpointer), &gva))
		return 1;
@@ -4869,7 +4869,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
{
	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
						    : get_vmcs12(vcpu);
	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct x86_exception e;
@@ -4955,7 +4955,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
{
	struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
						    : get_vmcs12(vcpu);
	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct x86_exception e;
@@ -5140,7 +5140,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
/* Emulate the VMPTRST instruction */
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
	unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
	unsigned long exit_qual = vmx_get_exit_qual(vcpu);
	u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
	gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
	struct x86_exception e;
@@ -5208,7 +5208,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
	/* According to the Intel VMX instruction reference, the memory
	 * operand is read even if it isn't needed (e.g., for type==global)
	 */
	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
			vmx_instruction_info, false, sizeof(operand), &gva))
		return 1;
	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
@@ -5290,7 +5290,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
	/* according to the intel vmx instruction reference, the memory
	 * operand is read even if it isn't needed (e.g., for type==global)
	 */
	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
			vmx_instruction_info, false, sizeof(operand), &gva))
		return 1;
	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
@@ -5420,7 +5420,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
fail:
	nested_vmx_vmexit(vcpu, vmx->exit_reason,
			  vmcs_read32(VM_EXIT_INTR_INFO),
			  vmcs_readl(EXIT_QUALIFICATION));
			  vmx_get_exit_qual(vcpu));
	return 1;
}

@@ -5471,7 +5471,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
		return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);

	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	exit_qualification = vmx_get_exit_qual(vcpu);

	port = exit_qualification >> 16;
	size = (exit_qualification & 7) + 1;
@@ -5525,7 +5525,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
	struct vmcs12 *vmcs12)
{
	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
	int cr = exit_qualification & 15;
	int reg;
	unsigned long val;
@@ -5849,7 +5849,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
	}

	exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
	exit_qual = vmcs_readl(EXIT_QUALIFICATION);
	exit_qual = vmx_get_exit_qual(vcpu);

	trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, exit_qual,
				vmx->idt_vectoring_info, exit_intr_info,
+14 −14
Original line number Diff line number Diff line
@@ -4698,7 +4698,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
	}

	if (is_page_fault(intr_info)) {
		cr2 = vmcs_readl(EXIT_QUALIFICATION);
		cr2 = vmx_get_exit_qual(vcpu);
		/* EPT won't cause page fault directly */
		WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
		return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
@@ -4714,7 +4714,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
		kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
		return 1;
	case DB_VECTOR:
		dr6 = vmcs_readl(EXIT_QUALIFICATION);
		dr6 = vmx_get_exit_qual(vcpu);
		if (!(vcpu->guest_debug &
		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
			vcpu->arch.dr6 &= ~DR_TRAP_BITS;
@@ -4769,7 +4769,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
	int size, in, string;
	unsigned port;

	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	exit_qualification = vmx_get_exit_qual(vcpu);
	string = (exit_qualification & 16) != 0;

	++vcpu->stat.io_exits;
@@ -4860,7 +4860,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
	int err;
	int ret;

	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	exit_qualification = vmx_get_exit_qual(vcpu);
	cr = exit_qualification & 15;
	reg = (exit_qualification >> 8) & 15;
	switch ((exit_qualification >> 4) & 3) {
@@ -4937,7 +4937,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
	unsigned long exit_qualification;
	int dr, dr7, reg;

	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	exit_qualification = vmx_get_exit_qual(vcpu);
	dr = exit_qualification & DEBUG_REG_ACCESS_NUM;

	/* First, if DR does not exist, trigger UD */
@@ -5050,7 +5050,7 @@ static int handle_invd(struct kvm_vcpu *vcpu)

static int handle_invlpg(struct kvm_vcpu *vcpu)
{
	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);

	kvm_mmu_invlpg(vcpu, exit_qualification);
	return kvm_skip_emulated_instruction(vcpu);
@@ -5082,7 +5082,7 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
	if (likely(fasteoi)) {
		unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
		unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
		int access_type, offset;

		access_type = exit_qualification & APIC_ACCESS_TYPE;
@@ -5103,7 +5103,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)

static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
{
	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
	int vector = exit_qualification & 0xff;

	/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
@@ -5113,7 +5113,7 @@ static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)

static int handle_apic_write(struct kvm_vcpu *vcpu)
{
	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
	u32 offset = exit_qualification & 0xfff;

	/* APIC-write VM exit is trap-like and thus no need to adjust IP */
@@ -5134,7 +5134,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
	idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
	type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);

	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	exit_qualification = vmx_get_exit_qual(vcpu);

	reason = (u32)exit_qualification >> 30;
	if (reason == TASK_SWITCH_GATE && idt_v) {
@@ -5184,7 +5184,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
	gpa_t gpa;
	u64 error_code;

	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	exit_qualification = vmx_get_exit_qual(vcpu);

	/*
	 * EPT violation happened while executing iret from NMI,
@@ -5444,7 +5444,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
	/* According to the Intel instruction reference, the memory operand
	 * is read even if it isn't needed (e.g., for type==all)
	 */
	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
	if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
				vmx_instruction_info, false,
				sizeof(operand), &gva))
		return 1;
@@ -5520,7 +5520,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)

	trace_kvm_pml_full(vcpu->vcpu_id);

	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	exit_qualification = vmx_get_exit_qual(vcpu);

	/*
	 * PML buffer FULL happened while executing iret from NMI,
@@ -5634,7 +5634,7 @@ static const int kvm_vmx_max_exit_handlers =

static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
	*info1 = vmcs_readl(EXIT_QUALIFICATION);
	*info1 = vmx_get_exit_qual(vcpu);
	*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
}

+14 −1
Original line number Diff line number Diff line
@@ -210,6 +210,7 @@ struct vcpu_vmx {
	 */
	bool		      guest_state_loaded;

	unsigned long         exit_qualification;
	u32                   exit_intr_info;
	u32                   idt_vectoring_info;
	ulong                 rflags;
@@ -449,7 +450,8 @@ static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
				  | (1 << VCPU_EXREG_RFLAGS)
				  | (1 << VCPU_EXREG_PDPTR)
				  | (1 << VCPU_EXREG_SEGMENTS)
				  | (1 << VCPU_EXREG_CR3));
				  | (1 << VCPU_EXREG_CR3)
				  | (1 << VCPU_EXREG_EXIT_INFO_1));
	vcpu->arch.regs_dirty = 0;
}

@@ -493,6 +495,17 @@ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
	return &(to_vmx(vcpu)->pi_desc);
}

static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
		vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	}
	return vmx->exit_qualification;
}

struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
void free_vmcs(struct vmcs *vmcs);
int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);