Commit 41a23ab3 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini
Browse files

KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check



state_test/smm_test use KVM_CAP_NESTED_STATE check as an indicator for
nested VMX/SVM presence and this is incorrect. Check for the required
features dirrectly.

Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200610135847.754289-2-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 77f81f37
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@ struct svm_test_data {
struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
bool nested_svm_supported(void);
void nested_svm_check_supported(void);

static inline bool cpu_has_svm(void)
+1 −0
Original line number Diff line number Diff line
@@ -603,6 +603,7 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
bool load_vmcs(struct vmx_pages *vmx);

bool nested_vmx_supported(void);
void nested_vmx_check_supported(void);

void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+7 −3
Original line number Diff line number Diff line
@@ -148,14 +148,18 @@ void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
		: "r15", "memory");
}

void nested_svm_check_supported(void)
bool nested_svm_supported(void)
{
	struct kvm_cpuid_entry2 *entry =
		kvm_get_supported_cpuid_entry(0x80000001);

	if (!(entry->ecx & CPUID_SVM)) {
	return entry->ecx & CPUID_SVM;
}

void nested_svm_check_supported(void)
{
	if (!nested_svm_supported()) {
		print_skip("nested SVM not enabled");
		exit(KSFT_SKIP);
	}
}
+7 −2
Original line number Diff line number Diff line
@@ -379,11 +379,16 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
	init_vmcs_guest_state(guest_rip, guest_rsp);
}

void nested_vmx_check_supported(void)
bool nested_vmx_supported(void)
{
	struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);

	if (!(entry->ecx & CPUID_VMX)) {
	return entry->ecx & CPUID_VMX;
}

void nested_vmx_check_supported(void)
{
	if (!nested_vmx_supported()) {
		print_skip("nested VMX not enabled");
		exit(KSFT_SKIP);
	}
+7 −6
Original line number Diff line number Diff line
@@ -118,16 +118,17 @@ int main(int argc, char *argv[])
	vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);

	if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
		if (kvm_get_supported_cpuid_entry(0x80000001)->ecx & CPUID_SVM)
		if (nested_svm_supported())
			vcpu_alloc_svm(vm, &nested_gva);
		else
		else if (nested_vmx_supported())
			vcpu_alloc_vmx(vm, &nested_gva);
		vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
	} else {
		pr_info("will skip SMM test with VMX enabled\n");
		vcpu_args_set(vm, VCPU_ID, 1, 0);
	}

	if (!nested_gva)
		pr_info("will skip SMM test with VMX enabled\n");

	vcpu_args_set(vm, VCPU_ID, 1, nested_gva);

	for (stage = 1;; stage++) {
		_vcpu_run(vm, VCPU_ID);
		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
Loading