Commit b5c3c1b3 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Micro-optimize nEPT's bad memptype/XWR checks

Rework the handling of nEPT's bad memtype/XWR checks to micro-optimize
the checks as much as possible.  Move the check to a separate helper,
__is_bad_mt_xwr(), which allows the guest_rsvd_check usage in
paging_tmpl.h to omit the check entirely for paging32/64 (bad_mt_xwr is
always zero for non-nEPT) while retaining the bitwise-OR of the current
code for the shadow_zero_check in walk_shadow_page_get_mmio_spte().

Add a comment for the bitwise-OR usage in the mmio spte walk to avoid
future attempts to "fix" the code, which is what prompted this
optimization in the first place[*].

Opportunistically remove the superfluous '!= 0' and parantheses, and
use BIT_ULL() instead of open coding its equivalent.

The net effect is that code generation is largely unchanged for
walk_shadow_page_get_mmio_spte(), marginally better for
ept_prefetch_invalid_gpte(), and significantly improved for
paging32/64_prefetch_invalid_gpte().

Note, walk_shadow_page_get_mmio_spte() can't use a templated version of
the memtype/XRW as it works on the host's shadow PTEs, e.g. checks that
KVM hasn't borked its EPT tables.  Even if it could be templated, the
benefits of having a single implementation far outweight the few uops
that would be saved for NPT or non-TDP paging, e.g. most compilers
inline it all the way to up kvm_mmu_page_fault().

[*] https://lkml.kernel.org/r/20200108001859.25254-1-sean.j.christopherson@intel.com



Cc: Jim Mattson <jmattson@google.com>
Cc: David Laight <David.Laight@ACULAB.COM>
Cc: Arvind Sankar <nivedita@alum.mit.edu>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f8052a05
Loading
Loading
Loading
Loading
+14 −12
Original line number Diff line number Diff line
@@ -3968,20 +3968,14 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
static bool
__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
{
	int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;
	int bit7 = (pte >> 7) & 1;

	return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
		((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
	return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
}

static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
{
	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
}

static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
{
	return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
	return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
}

static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
@@ -4005,9 +3999,12 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
	struct rsvd_bits_validate *rsvd_check;
	int root, leaf;
	bool reserved = false;

	rsvd_check = &vcpu->arch.mmu->shadow_zero_check;

	walk_shadow_page_lockless_begin(vcpu);

	for (shadow_walk_init(&iterator, vcpu, addr),
@@ -4022,8 +4019,13 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
		if (!is_shadow_present_pte(spte))
			break;

		reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
						    iterator.level);
		/*
		 * Use a bitwise-OR instead of a logical-OR to aggregate the
		 * reserved bit and EPT's invalid memtype/XWR checks to avoid
		 * adding a Jcc in the loop.
		 */
		reserved |= __is_bad_mt_xwr(rsvd_check, spte) |
			    __is_rsvd_bits_set(rsvd_check, spte, iterator.level);
	}

	walk_shadow_page_lockless_end(vcpu);
+17 −2
Original line number Diff line number Diff line
@@ -128,6 +128,21 @@ static inline int FNAME(is_present_gpte)(unsigned long pte)
#endif
}

static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
{
#if PTTYPE != PTTYPE_EPT
	return false;
#else
	return __is_bad_mt_xwr(rsvd_check, gpte);
#endif
}

static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
{
	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
	       FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
}

static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
			       pt_element_t __user *ptep_user, unsigned index,
			       pt_element_t orig_pte, pt_element_t new_pte)
@@ -183,7 +198,7 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
	    !(gpte & PT_GUEST_ACCESSED_MASK))
		goto no_present;

	if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
	if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
		goto no_present;

	return false;
@@ -400,7 +415,7 @@ retry_walk:
		if (unlikely(!FNAME(is_present_gpte)(pte)))
			goto error;

		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
		if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
			errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
			goto error;
		}