Commit 408e9a31 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: CPUID: add support for supervisor states



Current CPUID 0xd enumeration code does not support supervisor
states, because KVM only supports setting IA32_XSS to zero.
Change it instead to use a new variable supported_xss, to be
set from the hardware_setup callback which is in charge of CPU
capabilities.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 25703874
Loading
Loading
Loading
Loading
+18 −12
Original line number Diff line number Diff line
@@ -642,15 +642,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)

		cpuid_entry_override(entry, CPUID_D_1_EAX);
		if (entry->eax & (F(XSAVES)|F(XSAVEC)))
			entry->ebx = xstate_required_size(supported_xcr0, true);
		else
			entry->ebx = xstate_required_size(supported_xcr0 | supported_xss,
							  true);
		else {
			WARN_ON_ONCE(supported_xss != 0);
			entry->ebx = 0;
		/* Saving XSS controlled state via XSAVES isn't supported. */
		entry->ecx = 0;
		entry->edx = 0;
		}
		entry->ecx &= supported_xss;
		entry->edx &= supported_xss >> 32;

		for (i = 2; i < 64; ++i) {
			if (!(supported_xcr0 & BIT_ULL(i)))
			bool s_state;
			if (supported_xcr0 & BIT_ULL(i))
				s_state = false;
			else if (supported_xss & BIT_ULL(i))
				s_state = true;
			else
				continue;

			entry = do_host_cpuid(array, function, i);
@@ -659,17 +666,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)

			/*
			 * The supported check above should have filtered out
			 * invalid sub-leafs as well as sub-leafs managed by
			 * IA32_XSS MSR.  Only XCR0-managed sub-leafs should
			 * invalid sub-leafs.  Only valid sub-leafs should
			 * reach this point, and they should have a non-zero
			 * save state size.
			 * save state size.  Furthermore, check whether the
			 * processor agrees with supported_xcr0/supported_xss
			 * on whether this is an XCR0- or IA32_XSS-managed area.
			 */
			if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 1))) {
			if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
				--array->nent;
				continue;
			}

			entry->ecx = 0;
			entry->edx = 0;
		}
		break;
+2 −0
Original line number Diff line number Diff line
@@ -1371,6 +1371,8 @@ static __init void svm_set_cpu_caps(void)
{
	kvm_set_cpu_caps();

	supported_xss = 0;

	/* CPUID 0x80000001 and 0x8000000A (SVM features) */
	if (nested) {
		kvm_cpu_cap_set(X86_FEATURE_SVM);
+1 −0
Original line number Diff line number Diff line
@@ -7126,6 +7126,7 @@ static __init void vmx_set_cpu_caps(void)
		kvm_cpu_cap_set(X86_FEATURE_UMIP);

	/* CPUID 0xD.1 */
	supported_xss = 0;
	if (!vmx_xsaves_supported())
		kvm_cpu_cap_clear(X86_FEATURE_XSAVES);

+9 −4
Original line number Diff line number Diff line
@@ -190,6 +190,8 @@ u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);

static u64 __read_mostly host_xss;
u64 __read_mostly supported_xss;
EXPORT_SYMBOL_GPL(supported_xss);

struct kvm_stats_debugfs_item debugfs_entries[] = {
	{ "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -2827,7 +2829,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
		 * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
		 * XSAVES/XRSTORS to save/restore PT MSRs.
		 */
		if (data != 0)
		if (data & ~supported_xss)
			return 1;
		vcpu->arch.ia32_xss = data;
		break;
@@ -9617,10 +9619,16 @@ int kvm_arch_hardware_setup(void)

	rdmsrl_safe(MSR_EFER, &host_efer);

	if (boot_cpu_has(X86_FEATURE_XSAVES))
		rdmsrl(MSR_IA32_XSS, host_xss);

	r = kvm_x86_ops->hardware_setup();
	if (r != 0)
		return r;

	if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
		supported_xss = 0;

	cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);

	if (kvm_has_tsc_control) {
@@ -9637,9 +9645,6 @@ int kvm_arch_hardware_setup(void)
		kvm_default_tsc_scaling_ratio = 1ULL << kvm_tsc_scaling_ratio_frac_bits;
	}

	if (boot_cpu_has(X86_FEATURE_XSAVES))
		rdmsrl(MSR_IA32_XSS, host_xss);

	kvm_init_msr_list();
	return 0;
}
+1 −0
Original line number Diff line number Diff line
@@ -272,6 +272,7 @@ enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vc

extern u64 host_xcr0;
extern u64 supported_xcr0;
extern u64 supported_xss;

static inline bool kvm_mpx_supported(void)
{