Commit d400c5b2 authored by David Brazdil's avatar David Brazdil Committed by Marc Zyngier
Browse files

KVM: arm64: Split hyp/debug-sr.c to VHE/nVHE



debug-sr.c contains KVM's code for context-switching debug registers, with some
code shared between VHE/nVHE. These common routines are moved to a header file,
VHE-specific code is moved to vhe/debug-sr.c and nVHE-specific code to
nvhe/debug-sr.c.

Functions are slightly refactored to move code hidden behind `has_vhe()` checks
to the corresponding .c files.

Signed-off-by: default avatarDavid Brazdil <dbrazdil@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200625131420.71444-11-dbrazdil@google.com
parent 09cf57eb
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -66,11 +66,6 @@ __efistub__ctype = _ctype;
/* Symbols defined in aarch32.c (not yet compiled with nVHE build rules). */
KVM_NVHE_ALIAS(kvm_skip_instr32);

/* Symbols defined in debug-sr.c (not yet compiled with nVHE build rules). */
KVM_NVHE_ALIAS(__debug_switch_to_guest);
KVM_NVHE_ALIAS(__debug_switch_to_host);
KVM_NVHE_ALIAS(__kvm_get_mdcr_el2);

/* Symbols defined in entry.S (not yet compiled with nVHE build rules). */
KVM_NVHE_ALIAS(__guest_enter);
KVM_NVHE_ALIAS(__guest_exit);
+1 −1
Original line number Diff line number Diff line
@@ -14,7 +14,7 @@ obj-$(CONFIG_KVM) += hyp.o vhe/ nvhe/
obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o

hyp-y := vgic-v3-sr.o timer-sr.o aarch32.o vgic-v2-cpuif-proxy.o sysreg-sr.o \
	 debug-sr.o entry.o fpsimd.o
	 entry.o fpsimd.o

# KVM code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
+12 −66
Original line number Diff line number Diff line
@@ -4,6 +4,9 @@
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

#ifndef __ARM64_KVM_HYP_DEBUG_SR_H__
#define __ARM64_KVM_HYP_DEBUG_SR_H__

#include <linux/compiler.h>
#include <linux/kvm_host.h>

@@ -85,51 +88,7 @@
	default:	write_debug(ptr[0], reg, 0);			\
	}

static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
{
	u64 reg;

	/* Clear pmscr in case of early return */
	*pmscr_el1 = 0;

	/* SPE present on this CPU? */
	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
						  ID_AA64DFR0_PMSVER_SHIFT))
		return;

	/* Yes; is it owned by EL3? */
	reg = read_sysreg_s(SYS_PMBIDR_EL1);
	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
		return;

	/* No; is the host actually using the thing? */
	reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
	if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
		return;

	/* Yes; save the control register and disable data generation */
	*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
	write_sysreg_s(0, SYS_PMSCR_EL1);
	isb();

	/* Now drain all buffered data to memory */
	psb_csync();
	dsb(nsh);
}

static void __hyp_text __debug_restore_spe_nvhe(u64 pmscr_el1)
{
	if (!pmscr_el1)
		return;

	/* The host page table is installed, but not yet synchronised */
	isb();

	/* Re-enable data generation */
	write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}

static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
						 struct kvm_guest_debug_arch *dbg,
						 struct kvm_cpu_context *ctxt)
{
@@ -148,7 +107,7 @@ static void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
	ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
}

static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
						    struct kvm_guest_debug_arch *dbg,
						    struct kvm_cpu_context *ctxt)
{
@@ -168,20 +127,13 @@ static void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
	write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
}

void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *host_ctxt;
	struct kvm_cpu_context *guest_ctxt;
	struct kvm_guest_debug_arch *host_dbg;
	struct kvm_guest_debug_arch *guest_dbg;

	/*
	 * Non-VHE: Disable and flush SPE data generation
	 * VHE: The vcpu can run, but it can't hide.
	 */
	if (!has_vhe())
		__debug_save_spe_nvhe(&vcpu->arch.host_debug_state.pmscr_el1);

	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
		return;

@@ -194,16 +146,13 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
	__debug_restore_state(vcpu, guest_dbg, guest_ctxt);
}

void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
static inline void __hyp_text __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
{
	struct kvm_cpu_context *host_ctxt;
	struct kvm_cpu_context *guest_ctxt;
	struct kvm_guest_debug_arch *host_dbg;
	struct kvm_guest_debug_arch *guest_dbg;

	if (!has_vhe())
		__debug_restore_spe_nvhe(vcpu->arch.host_debug_state.pmscr_el1);

	if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
		return;

@@ -218,7 +167,4 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
	vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
}

u32 __hyp_text __kvm_get_mdcr_el2(void)
{
	return read_sysreg(mdcr_el2);
}
#endif /* __ARM64_KVM_HYP_DEBUG_SR_H__ */
+1 −1
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@
asflags-y := -D__KVM_NVHE_HYPERVISOR__
ccflags-y := -D__KVM_NVHE_HYPERVISOR__

obj-y := switch.o tlb.o hyp-init.o ../hyp-entry.o
obj-y := debug-sr.o switch.o tlb.o hyp-init.o ../hyp-entry.o

obj-y := $(patsubst %.o,%.hyp.o,$(obj-y))
extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))
+77 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2015 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

#include <hyp/debug-sr.h>

#include <linux/compiler.h>
#include <linux/kvm_host.h>

#include <asm/debug-monitors.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>

static void __hyp_text __debug_save_spe(u64 *pmscr_el1)
{
	u64 reg;

	/* Clear pmscr in case of early return */
	*pmscr_el1 = 0;

	/* SPE present on this CPU? */
	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
						  ID_AA64DFR0_PMSVER_SHIFT))
		return;

	/* Yes; is it owned by EL3? */
	reg = read_sysreg_s(SYS_PMBIDR_EL1);
	if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
		return;

	/* No; is the host actually using the thing? */
	reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
	if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
		return;

	/* Yes; save the control register and disable data generation */
	*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
	write_sysreg_s(0, SYS_PMSCR_EL1);
	isb();

	/* Now drain all buffered data to memory */
	psb_csync();
	dsb(nsh);
}

static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
{
	if (!pmscr_el1)
		return;

	/* The host page table is installed, but not yet synchronised */
	isb();

	/* Re-enable data generation */
	write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}

void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
{
	/* Disable and flush SPE data generation */
	__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
	__debug_switch_to_guest_common(vcpu);
}

void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
{
	__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
	__debug_switch_to_host_common(vcpu);
}

u32 __hyp_text __kvm_get_mdcr_el2(void)
{
	return read_sysreg(mdcr_el2);
}
Loading