Commit 850448f3 authored by Peter Shier's avatar Peter Shier Committed by Paolo Bonzini
Browse files

KVM: nVMX: Fix VMX preemption timer migration



Add new field to hold preemption timer expiration deadline
appended to struct kvm_vmx_nested_state_hdr. This is to prevent
the first VM-Enter after migration from incorrectly restarting the timer
with the full timer value instead of partially decayed timer value.
KVM_SET_NESTED_STATE restarts timer using migrated state regardless
of whether L1 sets VM_EXIT_SAVE_VMX_PREEMPTION_TIMER.

Fixes: cf8b84f4 ("kvm: nVMX: Prepare for checkpointing L2 state")

Signed-off-by: default avatarPeter Shier <pshier@google.com>
Signed-off-by: default avatarMakarand Sonare <makarandsonare@google.com>
Message-Id: <20200526215107.205814-2-makarandsonare@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f7d31e65
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -4334,9 +4334,13 @@ Errors:
  #define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE	0x00000001
  #define KVM_STATE_NESTED_VMX_SMM_VMXON	0x00000002

#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001

  struct kvm_vmx_nested_state_hdr {
	__u32 flags;
	__u64 vmxon_pa;
	__u64 vmcs12_pa;
	__u64 preemption_timer_deadline;

	struct {
		__u16 flags;
+3 −0
Original line number Diff line number Diff line
@@ -400,6 +400,7 @@ struct kvm_sync_regs {

#define KVM_STATE_NESTED_SVM_VMCB_SIZE	0x1000

#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE	0x00000001

struct kvm_vmx_nested_state_data {
	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -407,8 +408,10 @@ struct kvm_vmx_nested_state_data {
};

struct kvm_vmx_nested_state_hdr {
	__u32 flags;
	__u64 vmxon_pa;
	__u64 vmcs12_pa;
	__u64 preemption_timer_deadline;

	struct {
		__u16 flags;
+47 −8
Original line number Diff line number Diff line
@@ -2087,9 +2087,29 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
	return HRTIMER_NORESTART;
}

static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
	u64 timer_value = 0;

	u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
			    VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;

	if (!vmx->nested.has_preemption_timer_deadline) {
		timer_value = vmcs12->vmx_preemption_timer_value;
		vmx->nested.preemption_timer_deadline = timer_value +
							l1_scaled_tsc;
		vmx->nested.has_preemption_timer_deadline = true;
	} else if (l1_scaled_tsc < vmx->nested.preemption_timer_deadline)
		timer_value = vmx->nested.preemption_timer_deadline -
			      l1_scaled_tsc;
	return timer_value;
}

static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
					u64 preemption_timeout)
{
	u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	/*
@@ -3348,8 +3368,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
	 * the timer.
	 */
	vmx->nested.preemption_timer_expired = false;
	if (nested_cpu_has_preemption_timer(vmcs12))
		vmx_start_preemption_timer(vcpu);
	if (nested_cpu_has_preemption_timer(vmcs12)) {
		u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
		vmx_start_preemption_timer(vcpu, timer_value);
	}

	/*
	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -3457,6 +3479,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
	 * the nested entry.
	 */
	vmx->nested.nested_run_pending = 1;
	vmx->nested.has_preemption_timer_deadline = false;
	status = nested_vmx_enter_non_root_mode(vcpu, true);
	if (unlikely(status != NVMX_VMENTRY_SUCCESS))
		goto vmentry_failed;
@@ -3957,7 +3980,8 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
		vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;

	if (nested_cpu_has_preemption_timer(vmcs12) &&
	    vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
	    vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
	    !vmx->nested.nested_run_pending)
		vmcs12->vmx_preemption_timer_value =
			vmx_get_preemption_timer_value(vcpu);

@@ -5891,8 +5915,10 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
		.flags = 0,
		.format = KVM_STATE_NESTED_FORMAT_VMX,
		.size = sizeof(kvm_state),
		.hdr.vmx.flags = 0,
		.hdr.vmx.vmxon_pa = -1ull,
		.hdr.vmx.vmcs12_pa = -1ull,
		.hdr.vmx.preemption_timer_deadline = 0,
	};
	struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
		&user_kvm_nested_state->data.vmx[0];
@@ -5934,6 +5960,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,

			if (vmx->nested.mtf_pending)
				kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;

			if (nested_cpu_has_preemption_timer(vmcs12) &&
			    vmx->nested.has_preemption_timer_deadline) {
				kvm_state.hdr.vmx.flags |=
					KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
				kvm_state.hdr.vmx.preemption_timer_deadline =
					vmx->nested.preemption_timer_deadline;
			}
		}
	}

@@ -5979,7 +6013,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
				 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
			return -EFAULT;
	}

out:
	return kvm_state.size;
}
@@ -6141,6 +6174,12 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
			goto error_guest_mode;
	}

	if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
		vmx->nested.has_preemption_timer_deadline = true;
		vmx->nested.preemption_timer_deadline =
			kvm_state->hdr.vmx.preemption_timer_deadline;
	}

	if (nested_vmx_check_controls(vcpu, vmcs12) ||
	    nested_vmx_check_host_state(vcpu, vmcs12) ||
	    nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
+2 −0
Original line number Diff line number Diff line
@@ -169,6 +169,8 @@ struct nested_vmx {
	u16 posted_intr_nv;

	struct hrtimer preemption_timer;
	u64 preemption_timer_deadline;
	bool has_preemption_timer_deadline;
	bool preemption_timer_expired;

	/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */