Commit 8e01d9a3 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put



When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.

To address this, let's move things around a bit:

- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"

Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.

Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
parent 5c401308
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -141,12 +141,17 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
int its_schedule_vpe(struct its_vpe *vpe, bool on)
{
	struct its_cmd_info info;
	int ret;

	WARN_ON(preemptible());

	info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;

	return its_send_vpe_cmd(vpe, &info);
	ret = its_send_vpe_cmd(vpe, &info);
	if (!ret)
		vpe->resident = on;

	return ret;
}

int its_invall_vpe(struct its_vpe *vpe)
+2 −2
Original line number Diff line number Diff line
@@ -396,7 +396,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
				 struct kvm_kernel_irq_routing_entry *irq_entry);

void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
int vgic_v4_load(struct kvm_vcpu *vcpu);
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);

#endif /* __KVM_ARM_VGIC_H */
+2 −0
Original line number Diff line number Diff line
@@ -35,6 +35,8 @@ struct its_vpe {
	/* Doorbell interrupt */
	int			irq;
	irq_hw_number_t		vpe_db_lpi;
	/* VPE resident */
	bool			resident;
	/* VPE proxy mapping */
	int			vpe_proxy_event;
	/*
+8 −4
Original line number Diff line number Diff line
@@ -322,20 +322,24 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
	/*
	 * If we're about to block (most likely because we've just hit a
	 * WFI), we need to sync back the state of the GIC CPU interface
	 * so that we have the lastest PMR and group enables. This ensures
	 * so that we have the latest PMR and group enables. This ensures
	 * that kvm_arch_vcpu_runnable has up-to-date data to decide
	 * whether we have pending interrupts.
	 *
	 * For the same reason, we want to tell GICv4 that we need
	 * doorbells to be signalled, should an interrupt become pending.
	 */
	preempt_disable();
	kvm_vgic_vmcr_sync(vcpu);
	vgic_v4_put(vcpu, true);
	preempt_enable();

	kvm_vgic_v4_enable_doorbell(vcpu);
}

void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	kvm_vgic_v4_disable_doorbell(vcpu);
	preempt_disable();
	vgic_v4_load(vcpu);
	preempt_enable();
}

int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+4 −0
Original line number Diff line number Diff line
@@ -664,6 +664,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)

	if (has_vhe())
		__vgic_v3_activate_traps(vcpu);

	WARN_ON(vgic_v4_load(vcpu));
}

void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
@@ -676,6 +678,8 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)

void vgic_v3_put(struct kvm_vcpu *vcpu)
{
	WARN_ON(vgic_v4_put(vcpu, false));

	vgic_v3_vmcr_sync(vcpu);

	kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
Loading