Commit 8c008659 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: MMU: stop dereferencing vcpu->arch.mmu to get the context for MMU init



kvm_init_shadow_mmu() was actually the only function that could be called
with different vcpu->arch.mmu values.  Now that kvm_init_shadow_npt_mmu()
is separated from kvm_init_shadow_mmu(), we always know the MMU context
we need to use and there is no need to dereference vcpu->arch.mmu pointer.

Based on a patch by Vitaly Kuznetsov <vkuznets@redhat.com>.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200710141157.1640173-3-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0f04a2ac
Loading
Loading
Loading
Loading
+10 −11
Original line number Diff line number Diff line
@@ -4850,7 +4850,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)

static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = vcpu->arch.mmu;
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
	union kvm_mmu_role new_role =
		kvm_calc_tdp_mmu_root_page_role(vcpu, false);

@@ -4918,11 +4918,10 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
	return role;
}

static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4,
				    u32 efer, union kvm_mmu_role new_role)
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
				    u32 cr0, u32 cr4, u32 efer,
				    union kvm_mmu_role new_role)
{
	struct kvm_mmu *context = vcpu->arch.mmu;

	if (!(cr0 & X86_CR0_PG))
		nonpaging_init_context(vcpu, context);
	else if (efer & EFER_LMA)
@@ -4938,23 +4937,23 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4,

static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
{
	struct kvm_mmu *context = vcpu->arch.mmu;
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
	union kvm_mmu_role new_role =
		kvm_calc_shadow_mmu_root_page_role(vcpu, false);

	if (new_role.as_u64 != context->mmu_role.as_u64)
		shadow_mmu_init_context(vcpu, cr0, cr4, efer, new_role);
		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
}

void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
			     gpa_t nested_cr3)
{
	struct kvm_mmu *context = vcpu->arch.mmu;
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
	union kvm_mmu_role new_role =
		kvm_calc_shadow_mmu_root_page_role(vcpu, false);

	if (new_role.as_u64 != context->mmu_role.as_u64)
		shadow_mmu_init_context(vcpu, cr0, cr4, efer, new_role);
		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);

@@ -4990,7 +4989,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
			     bool accessed_dirty, gpa_t new_eptp)
{
	struct kvm_mmu *context = vcpu->arch.mmu;
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
	u8 level = vmx_eptp_page_walk_level(new_eptp);
	union kvm_mmu_role new_role =
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
@@ -5024,7 +5023,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);

static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *context = vcpu->arch.mmu;
	struct kvm_mmu *context = &vcpu->arch.root_mmu;

	kvm_init_shadow_mmu(vcpu,
			    kvm_read_cr0_bits(vcpu, X86_CR0_PG),