Commit b869855b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Move fast_cr3_switch() side effects to __kvm_mmu_new_cr3()



Handle the side effects of a fast CR3 (PGD) switch up a level in
__kvm_mmu_new_cr3(), which is the only caller of fast_cr3_switch().

This consolidates handling all side effects in __kvm_mmu_new_cr3()
(where freeing the current root when KVM can't do a fast switch is
already handled), and ameliorates the pain of adding a second boolean in
a future patch to provide a separate "skip" override for the MMU sync.

Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-31-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4de1f9d4
Loading
Loading
Loading
Loading
+31 −38
Original line number Diff line number Diff line
@@ -4284,8 +4284,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
}

static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
			    union kvm_mmu_page_role new_role,
			    bool skip_tlb_flush)
			    union kvm_mmu_page_role new_role)
{
	struct kvm_mmu *mmu = vcpu->arch.mmu;

@@ -4295,50 +4294,44 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
	 * later if necessary.
	 */
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
	    mmu->root_level >= PT64_ROOT_4LEVEL) {
		if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
	    mmu->root_level >= PT64_ROOT_4LEVEL)
		return !mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT) &&
		       cached_root_available(vcpu, new_cr3, new_role);

	return false;
}

static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
			      union kvm_mmu_page_role new_role,
			      bool skip_tlb_flush)
{
	if (!fast_cr3_switch(vcpu, new_cr3, new_role)) {
		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
		return;
	}

		if (cached_root_available(vcpu, new_cr3, new_role)) {
	/*
			 * It is possible that the cached previous root page is
			 * obsolete because of a change in the MMU generation
			 * number. However, changing the generation number is
			 * accompanied by KVM_REQ_MMU_RELOAD, which will free
			 * the root set here and allocate a new one.
	 * It's possible that the cached previous root page is obsolete because
	 * of a change in the MMU generation number. However, changing the
	 * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
	 * free the root set here and allocate a new one.
	 */
	kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);

	if (!skip_tlb_flush) {
		kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
	}

	/*
			 * The last MMIO access's GVA and GPA are cached in the
			 * VCPU. When switching to a new CR3, that GVA->GPA
			 * mapping may no longer be valid. So clear any cached
			 * MMIO info even when we don't need to sync the shadow
			 * page tables.
	 * The last MMIO access's GVA and GPA are cached in the VCPU. When
	 * switching to a new CR3, that GVA->GPA mapping may no longer be
	 * valid. So clear any cached MMIO info even when we don't need to sync
	 * the shadow page tables.
	 */
	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);

			__clear_sp_write_flooding_count(
				page_header(mmu->root_hpa));

			return true;
		}
	}

	return false;
}

static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
			      union kvm_mmu_page_role new_role,
			      bool skip_tlb_flush)
{
	if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
		kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
				   KVM_MMU_ROOT_CURRENT);
	__clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
}

void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)