Commit 07da1ffa authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: Remove host_cpu_context member from vcpu structure



For very long, we have kept this pointer back to the per-cpu
host state, despite having working per-cpu accessors at EL2
for some time now.

Recent investigations have shown that this pointer is easy
to abuse in preemptible context, which is a sure sign that
it would better be gone. Not to mention that a per-cpu
pointer is faster to access at all times.

Reported-by: default avatarAndrew Scull <ascull@google.com>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarAndrew Scull <ascull@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent b990d37f
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
	struct kvm_guest_debug_arch vcpu_debug_state;
	struct kvm_guest_debug_arch external_debug_state;

	/* Pointer to host CPU context */
	struct kvm_cpu_context *host_cpu_context;

	struct thread_info *host_thread_info;	/* hyp VA */
	struct user_fpsimd_state *host_fpsimd_state;	/* hyp VA */

+0 −3
Original line number Diff line number Diff line
@@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
	int *last_ran;
	kvm_host_data_t *cpu_data;

	last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
	cpu_data = this_cpu_ptr(&kvm_host_data);

	/*
	 * We might get preempted before the vCPU actually runs, but
@@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	}

	vcpu->cpu = cpu;
	vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;

	kvm_vgic_load(vcpu);
	kvm_timer_vcpu_load(vcpu);
+2 −2
Original line number Diff line number Diff line
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
		return;

	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
	guest_ctxt = &vcpu->arch.ctxt;
	host_dbg = &vcpu->arch.host_debug_state.regs;
	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
		return;

	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
	guest_ctxt = &vcpu->arch.ctxt;
	host_dbg = &vcpu->arch.host_debug_state.regs;
	guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
+3 −3
Original line number Diff line number Diff line
@@ -532,7 +532,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
	    !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
		return false;

	ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
	ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
	__ptrauth_save_key(ctxt->sys_regs, APIA);
	__ptrauth_save_key(ctxt->sys_regs, APIB);
	__ptrauth_save_key(ctxt->sys_regs, APDA);
@@ -703,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
	struct kvm_cpu_context *guest_ctxt;
	u64 exit_code;

	host_ctxt = vcpu->arch.host_cpu_context;
	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
	host_ctxt->__hyp_running_vcpu = vcpu;
	guest_ctxt = &vcpu->arch.ctxt;

@@ -808,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)

	vcpu = kern_hyp_va(vcpu);

	host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
	host_ctxt->__hyp_running_vcpu = vcpu;
	guest_ctxt = &vcpu->arch.ctxt;

+4 −2
Original line number Diff line number Diff line
@@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
 */
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
	struct kvm_cpu_context *host_ctxt;

	if (!has_vhe())
		return;

	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
	__sysreg_save_user_state(host_ctxt);

	/*
@@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
 */
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
	struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
	struct kvm_cpu_context *host_ctxt;

	if (!has_vhe())
		return;

	host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
	deactivate_traps_vhe_put();

	__sysreg_save_el1_state(guest_ctxt);
Loading