Commit 8564d637 authored by Steven Price's avatar Steven Price Committed by Marc Zyngier
Browse files

KVM: arm64: Support stolen time reporting via shared structure



Implement the service call for configuring a shared structure between a
VCPU and the hypervisor in which the hypervisor can write the time
stolen from the VCPU's execution time by other tasks on the host.

User space allocates memory which is placed at an IPA also chosen by user
space. The hypervisor then updates the shared structure using
kvm_put_guest() to ensure single copy atomicity of the 64-bit value
reporting the stolen time in nanoseconds.

Whenever stolen time is enabled by the guest, the stolen time counter is
reset.

The stolen time itself is retrieved from the sched_info structure
maintained by the Linux scheduler code. We enable SCHEDSTATS when
selecting KVM Kconfig to ensure this value is meaningful.

Signed-off-by: default avatarSteven Price <steven.price@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent cac0f1b7
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@
	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)

DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);

@@ -329,6 +330,24 @@ static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
	return SMCCC_RET_NOT_SUPPORTED;
}

static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
{
	return GPA_INVALID;
}

static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
}

static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
}

static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
	return false;
}

void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);

struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+20 −0
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
	KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING	KVM_ARCH_REQ(1)
#define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)

DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);

@@ -338,6 +339,13 @@ struct kvm_vcpu_arch {
	/* True when deferrable sysregs are loaded on the physical CPU,
	 * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
	bool sysregs_loaded_on_cpu;

	/* Guest PV state */
	struct {
		u64 steal;
		u64 last_steal;
		gpa_t base;
	} steal;
};

/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -479,6 +487,18 @@ int kvm_perf_init(void);
int kvm_perf_teardown(void);

long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);

static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
{
	vcpu_arch->steal.base = GPA_INVALID;
}

static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
{
	return (vcpu_arch->steal.base != GPA_INVALID);
}

void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);

+1 −0
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ config KVM
	select IRQ_BYPASS_MANAGER
	select HAVE_KVM_IRQ_BYPASS
	select HAVE_KVM_VCPU_RUN_PID_CHANGE
	select SCHEDSTATS
	---help---
	  Support hosting virtualized guest machines.
	  We don't support KVM with 16K page tables yet, due to the multiple
+2 −0
Original line number Diff line number Diff line
@@ -35,6 +35,8 @@ typedef unsigned long gva_t;
typedef u64            gpa_t;
typedef u64            gfn_t;

#define GPA_INVALID	(~(gpa_t)0)

typedef unsigned long  hva_t;
typedef u64            hpa_t;
typedef u64            hfn_t;
+11 −0
Original line number Diff line number Diff line
@@ -40,6 +40,10 @@
#include <asm/kvm_coproc.h>
#include <asm/sections.h>

#include <kvm/arm_hypercalls.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_psci.h>

#ifdef REQUIRES_VIRT
__asm__(".arch_extension	virt");
#endif
@@ -351,6 +355,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)

	kvm_arm_reset_debug_ptr(vcpu);

	kvm_arm_pvtime_vcpu_init(&vcpu->arch);

	return kvm_vgic_vcpu_init(vcpu);
}

@@ -380,6 +386,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	kvm_vcpu_load_sysregs(vcpu);
	kvm_arch_vcpu_load_fp(vcpu);
	kvm_vcpu_pmu_restore_guest(vcpu);
	if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
		kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);

	if (single_task_running())
		vcpu_clear_wfe_traps(vcpu);
@@ -645,6 +653,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
		 * that a VCPU sees new virtual interrupts.
		 */
		kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);

		if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
			kvm_update_stolen_time(vcpu);
	}
}

Loading