Commit 916ccadc authored by Paul Mackerras's avatar Paul Mackerras
Browse files

KVM: PPC: Book3S PR: Fix MSR setting when delivering interrupts



This makes sure that MSR "partial-function" bits are not transferred
to SRR1 when delivering an interrupt.  This was causing failures in
guests running kernels that include commit f3d96e69 ("powerpc/mm:
Overhaul handling of bad page faults", 2017-07-19), which added code
to check bits of SRR1 on instruction storage interrupts (ISIs) that
indicate a bad page fault.  The symptom was that a guest user program
that handled a signal and attempted to return from the signal handler
would get a SIGBUS signal and die.

The code that generated ISIs and some other interrupts would
previously set bits in the guest MSR to indicate the interrupt status
and then call kvmppc_book3s_queue_irqprio().  This technique no
longer works now that kvmppc_inject_interrupt() is masking off those
bits.  Instead we make kvmppc_core_queue_data_storage() and
kvmppc_core_queue_inst_storage() call kvmppc_inject_interrupt()
directly, and make sure that all the places that generate ISIs or
DSIs call kvmppc_core_queue_{data,inst}_storage instead of
kvmppc_book3s_queue_irqprio().

Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent b71dc519
Loading
Loading
Loading
Loading
+5 −8
Original line number Original line Diff line number Diff line
@@ -134,7 +134,7 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
{
	kvmppc_unfixup_split_real(vcpu);
	kvmppc_unfixup_split_real(vcpu);
	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
	kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
	kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
	vcpu->arch.mmu.reset_msr(vcpu);
	vcpu->arch.mmu.reset_msr(vcpu);
}
}
@@ -256,18 +256,15 @@ void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
{
{
	kvmppc_set_dar(vcpu, dar);
	kvmppc_set_dar(vcpu, dar);
	kvmppc_set_dsisr(vcpu, flags);
	kvmppc_set_dsisr(vcpu, flags);
	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
}
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);	/* used by kvm_hv */
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);


void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
{
{
	u64 msr = kvmppc_get_msr(vcpu);
	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
	msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
	msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
	kvmppc_set_msr_fast(vcpu, msr);
	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
}
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);


static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
					 unsigned int priority)
					 unsigned int priority)
+17 −25
Original line number Original line Diff line number Diff line
@@ -728,24 +728,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
		pte.may_execute = !data;
		pte.may_execute = !data;
	}
	}


	if (page_found == -ENOENT) {
	if (page_found == -ENOENT || page_found == -EPERM) {
		/* Page not found in guest PTE entries */
		/* Page not found in guest PTE entries, or protection fault */
		u64 ssrr1 = vcpu->arch.shadow_srr1;
		u64 flags;
		u64 msr = kvmppc_get_msr(vcpu);

		kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
		if (page_found == -EPERM)
		kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
			flags = DSISR_PROTFAULT;
		kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
		else
		kvmppc_book3s_queue_irqprio(vcpu, vec);
			flags = DSISR_NOHPTE;
	} else if (page_found == -EPERM) {
		if (data) {
		/* Storage protection */
			flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
		u32 dsisr = vcpu->arch.fault_dsisr;
			kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
		u64 ssrr1 = vcpu->arch.shadow_srr1;
		} else {
		u64 msr = kvmppc_get_msr(vcpu);
			kvmppc_core_queue_inst_storage(vcpu, flags);
		kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
		}
		dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
		kvmppc_set_dsisr(vcpu, dsisr);
		kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
		kvmppc_book3s_queue_irqprio(vcpu, vec);
	} else if (page_found == -EINVAL) {
	} else if (page_found == -EINVAL) {
		/* Page not found in guest SLB */
		/* Page not found in guest SLB */
		kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
		kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
@@ -1178,10 +1174,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
			kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
			kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
			r = RESUME_GUEST;
			r = RESUME_GUEST;
		} else {
		} else {
			u64 msr = kvmppc_get_msr(vcpu);
			kvmppc_core_queue_inst_storage(vcpu,
			msr |= shadow_srr1 & 0x58000000;
						shadow_srr1 & 0x58000000);
			kvmppc_set_msr_fast(vcpu, msr);
			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
			r = RESUME_GUEST;
			r = RESUME_GUEST;
		}
		}
		break;
		break;
@@ -1220,9 +1214,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
			r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
			r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
			srcu_read_unlock(&vcpu->kvm->srcu, idx);
			srcu_read_unlock(&vcpu->kvm->srcu, idx);
		} else {
		} else {
			kvmppc_set_dar(vcpu, dar);
			kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
			kvmppc_set_dsisr(vcpu, fault_dsisr);
			kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
			r = RESUME_GUEST;
			r = RESUME_GUEST;
		}
		}
		break;
		break;