Commit 689f3bf2 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86: unify callbacks to load paging root



Similar to what kvm-intel.ko is doing, provide a single callback that
merges svm_set_cr3, set_tdp_cr3 and nested_svm_set_tdp_cr3.

This lets us unify the set_cr3 and set_tdp_cr3 entries in kvm_x86_ops.
I'm doing that in this same patch because splitting it adds quite a bit
of churn due to the need for forward declarations.  For the same reason
the assignment to vcpu->arch.mmu->set_cr3 is moved to kvm_init_shadow_mmu
from init_kvm_softmmu and nested_svm_init_mmu_context.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f91af517
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -387,7 +387,6 @@ struct kvm_mmu_root_info {
 * current mmu mode.
 */
struct kvm_mmu {
	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
	unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
	u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
	int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
@@ -1156,8 +1155,6 @@ struct kvm_x86_ops {
	int (*get_tdp_level)(struct kvm_vcpu *vcpu);
	u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);

	void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

	bool (*has_wbinvd_exit)(void);

	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
+3 −3
Original line number Diff line number Diff line
@@ -95,10 +95,10 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
}

static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
{
	if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
		vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
		kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
				     kvm_get_active_pcid(vcpu));
}

+0 −2
Original line number Diff line number Diff line
@@ -4932,7 +4932,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
	context->update_pte = nonpaging_update_pte;
	context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
	context->direct_map = true;
	context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
	context->get_guest_pgd = get_cr3;
	context->get_pdptr = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
@@ -5079,7 +5078,6 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
	struct kvm_mmu *context = vcpu->arch.mmu;

	kvm_init_shadow_mmu(vcpu);
	context->set_cr3           = kvm_x86_ops->set_cr3;
	context->get_guest_pgd     = get_cr3;
	context->get_pdptr         = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
+18 −24
Original line number Diff line number Diff line
@@ -2989,15 +2989,6 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
	return pdpte;
}

static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
				   unsigned long root)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.nested_cr3 = __sme_set(root);
	mark_dirty(svm->vmcb, VMCB_NPT);
}

static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
				       struct x86_exception *fault)
{
@@ -3033,7 +3024,6 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)

	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
	kvm_init_shadow_mmu(vcpu);
	vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
	vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
	vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
	vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
@@ -5955,22 +5945,28 @@ STACK_FRAME_NON_STANDARD(svm_vcpu_run);
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	bool update_guest_cr3 = true;
	unsigned long cr3;

	svm->vmcb->save.cr3 = __sme_set(root);
	mark_dirty(svm->vmcb, VMCB_CR);
}

static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	svm->vmcb->control.nested_cr3 = __sme_set(root);
	cr3 = __sme_set(root);
	if (npt_enabled) {
		svm->vmcb->control.nested_cr3 = cr3;
		mark_dirty(svm->vmcb, VMCB_NPT);

	/* Also sync guest cr3 here in case we live migrate */
	svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
		/* Loading L2's CR3 is handled by enter_svm_guest_mode.  */
		if (is_guest_mode(vcpu))
			update_guest_cr3 = false;
		else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
			cr3 = vcpu->arch.cr3;
		else /* CR3 is already up-to-date.  */
			update_guest_cr3 = false;
	}

	if (update_guest_cr3) {
		svm->vmcb->save.cr3 = cr3;
		mark_dirty(svm->vmcb, VMCB_CR);
	}
}

static int is_disabled(void)
{
@@ -7418,8 +7414,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
	.read_l1_tsc_offset = svm_read_l1_tsc_offset,
	.write_l1_tsc_offset = svm_write_l1_tsc_offset,

	.set_tdp_cr3 = set_tdp_cr3,

	.check_intercept = svm_check_intercept,
	.handle_exit_irqoff = svm_handle_exit_irqoff,

+0 −1
Original line number Diff line number Diff line
@@ -354,7 +354,6 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
			VMX_EPT_EXECUTE_ONLY_BIT,
			nested_ept_ad_enabled(vcpu),
			nested_ept_get_eptp(vcpu));
	vcpu->arch.mmu->set_cr3           = vmx_set_cr3;
	vcpu->arch.mmu->get_guest_pgd     = nested_ept_get_eptp;
	vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
	vcpu->arch.mmu->get_pdptr         = kvm_pdptr_read;
Loading