Commit 4a632ac6 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Add separate override for MMU sync during fast CR3 switch



Add a separate "skip" override for MMU sync, a future change to avoid
TLB flushes on nested VMX transitions may need to sync the MMU even if
the TLB flush is unnecessary.

Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-32-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b869855b
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -1524,7 +1524,8 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			    gva_t gva, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush,
		     bool skip_mmu_sync);

void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);

+7 −6
Original line number Diff line number Diff line
@@ -4303,7 +4303,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,

static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
			      union kvm_mmu_page_role new_role,
			      bool skip_tlb_flush)
			      bool skip_tlb_flush, bool skip_mmu_sync)
{
	if (!fast_cr3_switch(vcpu, new_cr3, new_role)) {
		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
@@ -4318,10 +4318,10 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
	 */
	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);

	if (!skip_tlb_flush) {
	if (!skip_mmu_sync)
		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
	if (!skip_tlb_flush)
		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
	}

	/*
	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
@@ -4334,10 +4334,11 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
	__clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
}

void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush,
		     bool skip_mmu_sync)
{
	__kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
			  skip_tlb_flush);
			  skip_tlb_flush, skip_mmu_sync);
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);

@@ -5030,7 +5031,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
						   execonly, level);

	__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
	__kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false, false);

	if (new_role.as_u64 == context->mmu_role.as_u64)
		return;
+1 −1
Original line number Diff line number Diff line
@@ -1101,7 +1101,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
	}

	if (!nested_ept)
		kvm_mmu_new_cr3(vcpu, cr3, false);
		kvm_mmu_new_cr3(vcpu, cr3, false, false);

	vcpu->arch.cr3 = cr3;
	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+1 −1
Original line number Diff line number Diff line
@@ -1031,7 +1031,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
		 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
		return 1;

	kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush);
	kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
	vcpu->arch.cr3 = cr3;
	kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);