Commit c756ad03 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvm-s390-20140721' of...

Merge tag 'kvm-s390-20140721' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

Bugfixes
--------
- add IPTE to trace event decoder
- document and advertise KVM_CAP_S390_IRQCHIP

Cleanups
--------
- Reuse kvm_vcpu_block for s390
- Get rid of tasklet for wakup processing
parents 6f43ed01 e59d120f
Loading
Loading
Loading
Loading
+24 −3
Original line number Diff line number Diff line
@@ -2934,15 +2934,18 @@ The fields in each entry are defined as follows:
6. Capabilities that can be enabled
-----------------------------------

There are certain capabilities that change the behavior of the virtual CPU when
enabled. To enable them, please see section 4.37. Below you can find a list of
capabilities and what their effect on the vCPU is when enabling them.
There are certain capabilities that change the behavior of the virtual CPU or
the virtual machine when enabled. To enable them, please see section 4.37.
Below you can find a list of capabilities and what their effect on the vCPU or
the virtual machine is when enabling them.

The following information is provided along with the description:

  Architectures: which instruction set architectures provide this ioctl.
      x86 includes both i386 and x86_64.

  Target: whether this is a per-vcpu or per-vm capability.

  Parameters: what parameters are accepted by the capability.

  Returns: the return value.  General error numbers (EBADF, ENOMEM, EINVAL)
@@ -2952,6 +2955,7 @@ The following information is provided along with the description:
6.1 KVM_CAP_PPC_OSI

Architectures: ppc
Target: vcpu
Parameters: none
Returns: 0 on success; -1 on error

@@ -2966,6 +2970,7 @@ When this capability is enabled, KVM_EXIT_OSI can occur.
6.2 KVM_CAP_PPC_PAPR

Architectures: ppc
Target: vcpu
Parameters: none
Returns: 0 on success; -1 on error

@@ -2985,6 +2990,7 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
6.3 KVM_CAP_SW_TLB

Architectures: ppc
Target: vcpu
Parameters: args[0] is the address of a struct kvm_config_tlb
Returns: 0 on success; -1 on error

@@ -3027,6 +3033,7 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
6.4 KVM_CAP_S390_CSS_SUPPORT

Architectures: s390
Target: vcpu
Parameters: none
Returns: 0 on success; -1 on error

@@ -3038,9 +3045,13 @@ handled in-kernel, while the other I/O instructions are passed to userspace.
When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
SUBCHANNEL intercepts.

Note that even though this capability is enabled per-vcpu, the complete
virtual machine is affected.

6.5 KVM_CAP_PPC_EPR

Architectures: ppc
Target: vcpu
Parameters: args[0] defines whether the proxy facility is active
Returns: 0 on success; -1 on error

@@ -3066,7 +3077,17 @@ This capability connects the vcpu to an in-kernel MPIC device.
6.7 KVM_CAP_IRQ_XICS

Architectures: ppc
Target: vcpu
Parameters: args[0] is the XICS device fd
            args[1] is the XICS CPU number (server ID) for this vcpu

This capability connects the vcpu to an in-kernel XICS device.

6.8 KVM_CAP_S390_IRQCHIP

Architectures: s390
Target: vm
Parameters: none

This capability enables the in-kernel irqchip for s390. Please refer to
"4.24 KVM_CREATE_IRQCHIP" for details.
+0 −2
Original line number Diff line number Diff line
@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt {
	struct list_head list;
	atomic_t active;
	struct kvm_s390_float_interrupt *float_int;
	int timer_due; /* event indicator for waitqueue below */
	wait_queue_head_t *wq;
	atomic_t *cpuflags;
	unsigned int action_bits;
@@ -367,7 +366,6 @@ struct kvm_vcpu_arch {
	s390_fp_regs      guest_fpregs;
	struct kvm_s390_local_interrupt local_int;
	struct hrtimer    ckc_timer;
	struct tasklet_struct tasklet;
	struct kvm_s390_pgm_info pgm;
	union  {
		struct cpuid	cpu_id;
+1 −0
Original line number Diff line number Diff line
@@ -108,6 +108,7 @@
	exit_code_ipa0(0xB2, 0x17, "STETR"),	\
	exit_code_ipa0(0xB2, 0x18, "PC"),	\
	exit_code_ipa0(0xB2, 0x20, "SERVC"),	\
	exit_code_ipa0(0xB2, 0x21, "IPTE"),	\
	exit_code_ipa0(0xB2, 0x28, "PT"),	\
	exit_code_ipa0(0xB2, 0x29, "ISKE"),	\
	exit_code_ipa0(0xB2, 0x2a, "RRBE"),	\
+32 −68
Original line number Diff line number Diff line
@@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
	int rc = 0;

	if (atomic_read(&li->active)) {
		spin_lock_bh(&li->lock);
		spin_lock(&li->lock);
		list_for_each_entry(inti, &li->list, list)
			if (__interrupt_is_deliverable(vcpu, inti)) {
				rc = 1;
				break;
			}
		spin_unlock_bh(&li->lock);
		spin_unlock(&li->lock);
	}

	if ((!rc) && atomic_read(&fi->active)) {
@@ -585,88 +585,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{
	u64 now, sltime;
	DECLARE_WAITQUEUE(wait, current);

	vcpu->stat.exit_wait_state++;
	if (kvm_cpu_has_interrupt(vcpu))
		return 0;

	__set_cpu_idle(vcpu);
	spin_lock_bh(&vcpu->arch.local_int.lock);
	vcpu->arch.local_int.timer_due = 0;
	spin_unlock_bh(&vcpu->arch.local_int.lock);
	/* fast path */
	if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
		return 0;

	if (psw_interrupts_disabled(vcpu)) {
		VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
		__unset_cpu_idle(vcpu);
		return -EOPNOTSUPP; /* disabled wait */
	}

	__set_cpu_idle(vcpu);
	if (!ckc_interrupts_enabled(vcpu)) {
		VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
		goto no_timer;
	}

	now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
	if (vcpu->arch.sie_block->ckc < now) {
		__unset_cpu_idle(vcpu);
		return 0;
	}

	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);

	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer:
	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
	spin_lock(&vcpu->arch.local_int.float_int->lock);
	spin_lock_bh(&vcpu->arch.local_int.lock);
	add_wait_queue(&vcpu->wq, &wait);
	while (list_empty(&vcpu->arch.local_int.list) &&
		list_empty(&vcpu->arch.local_int.float_int->list) &&
		(!vcpu->arch.local_int.timer_due) &&
		!signal_pending(current) &&
		!kvm_s390_si_ext_call_pending(vcpu)) {
		set_current_state(TASK_INTERRUPTIBLE);
		spin_unlock_bh(&vcpu->arch.local_int.lock);
		spin_unlock(&vcpu->arch.local_int.float_int->lock);
		schedule();
		spin_lock(&vcpu->arch.local_int.float_int->lock);
		spin_lock_bh(&vcpu->arch.local_int.lock);
	}
	kvm_vcpu_block(vcpu);
	__unset_cpu_idle(vcpu);
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&vcpu->wq, &wait);
	spin_unlock_bh(&vcpu->arch.local_int.lock);
	spin_unlock(&vcpu->arch.local_int.float_int->lock);
	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);

	hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
	return 0;
}

void kvm_s390_tasklet(unsigned long parm)
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{
	struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;

	spin_lock(&vcpu->arch.local_int.lock);
	vcpu->arch.local_int.timer_due = 1;
	if (waitqueue_active(&vcpu->wq))
	if (waitqueue_active(&vcpu->wq)) {
		/*
		 * The vcpu gave up the cpu voluntarily, mark it as a good
		 * yield-candidate.
		 */
		vcpu->preempted = true;
		wake_up_interruptible(&vcpu->wq);
	spin_unlock(&vcpu->arch.local_int.lock);
	}
}

/*
 * low level hrtimer wake routine. Because this runs in hardirq context
 * we schedule a tasklet to do the real work.
 */
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{
	struct kvm_vcpu *vcpu;

	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
	vcpu->preempted = true;
	tasklet_schedule(&vcpu->arch.tasklet);
	kvm_s390_vcpu_wakeup(vcpu);

	return HRTIMER_NORESTART;
}
@@ -676,13 +644,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
	struct kvm_s390_interrupt_info  *n, *inti = NULL;

	spin_lock_bh(&li->lock);
	spin_lock(&li->lock);
	list_for_each_entry_safe(inti, n, &li->list, list) {
		list_del(&inti->list);
		kfree(inti);
	}
	atomic_set(&li->active, 0);
	spin_unlock_bh(&li->lock);
	spin_unlock(&li->lock);

	/* clear pending external calls set by sigp interpretation facility */
	atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
@@ -701,7 +669,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
	if (atomic_read(&li->active)) {
		do {
			deliver = 0;
			spin_lock_bh(&li->lock);
			spin_lock(&li->lock);
			list_for_each_entry_safe(inti, n, &li->list, list) {
				if (__interrupt_is_deliverable(vcpu, inti)) {
					list_del(&inti->list);
@@ -712,7 +680,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
			}
			if (list_empty(&li->list))
				atomic_set(&li->active, 0);
			spin_unlock_bh(&li->lock);
			spin_unlock(&li->lock);
			if (deliver) {
				__do_deliver_interrupt(vcpu, inti);
				kfree(inti);
@@ -758,7 +726,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
	if (atomic_read(&li->active)) {
		do {
			deliver = 0;
			spin_lock_bh(&li->lock);
			spin_lock(&li->lock);
			list_for_each_entry_safe(inti, n, &li->list, list) {
				if ((inti->type == KVM_S390_MCHK) &&
				    __interrupt_is_deliverable(vcpu, inti)) {
@@ -770,7 +738,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
			}
			if (list_empty(&li->list))
				atomic_set(&li->active, 0);
			spin_unlock_bh(&li->lock);
			spin_unlock(&li->lock);
			if (deliver) {
				__do_deliver_interrupt(vcpu, inti);
				kfree(inti);
@@ -817,11 +785,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)

	VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
	spin_lock_bh(&li->lock);
	spin_lock(&li->lock);
	list_add(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	BUG_ON(waitqueue_active(li->wq));
	spin_unlock_bh(&li->lock);
	spin_unlock(&li->lock);
	return 0;
}

@@ -842,11 +810,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,

	inti->type = KVM_S390_PROGRAM_INT;
	memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
	spin_lock_bh(&li->lock);
	spin_lock(&li->lock);
	list_add(&inti->list, &li->list);
	atomic_set(&li->active, 1);
	BUG_ON(waitqueue_active(li->wq));
	spin_unlock_bh(&li->lock);
	spin_unlock(&li->lock);
	return 0;
}

@@ -934,12 +902,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
	}
	dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
	li = &dst_vcpu->arch.local_int;
	spin_lock_bh(&li->lock);
	spin_lock(&li->lock);
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
	if (waitqueue_active(li->wq))
		wake_up_interruptible(li->wq);
	kvm_get_vcpu(kvm, sigcpu)->preempted = true;
	spin_unlock_bh(&li->lock);
	spin_unlock(&li->lock);
	kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
unlock_fi:
	spin_unlock(&fi->lock);
	mutex_unlock(&kvm->lock);
@@ -1081,7 +1047,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,

	mutex_lock(&vcpu->kvm->lock);
	li = &vcpu->arch.local_int;
	spin_lock_bh(&li->lock);
	spin_lock(&li->lock);
	if (inti->type == KVM_S390_PROGRAM_INT)
		list_add(&inti->list, &li->list);
	else
@@ -1090,11 +1056,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
	if (inti->type == KVM_S390_SIGP_STOP)
		li->action_bits |= ACTION_STOP_ON_STOP;
	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
	if (waitqueue_active(&vcpu->wq))
		wake_up_interruptible(&vcpu->wq);
	vcpu->preempted = true;
	spin_unlock_bh(&li->lock);
	spin_unlock(&li->lock);
	mutex_unlock(&vcpu->kvm->lock);
	kvm_s390_vcpu_wakeup(vcpu);
	return 0;
}

+10 −8
Original line number Diff line number Diff line
@@ -166,6 +166,7 @@ int kvm_dev_ioctl_check_extension(long ext)
	case KVM_CAP_IOEVENTFD:
	case KVM_CAP_DEVICE_CTRL:
	case KVM_CAP_ENABLE_CAP_VM:
	case KVM_CAP_S390_IRQCHIP:
	case KVM_CAP_VM_ATTRIBUTES:
	case KVM_CAP_MP_STATE:
		r = 1;
@@ -649,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
			return rc;
	}
	hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
	tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
		     (unsigned long) vcpu);
	vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
	get_cpu_id(&vcpu->arch.cpu_id);
	vcpu->arch.cpu_id.version = 0xff;
@@ -1068,6 +1067,9 @@ retry:
		goto retry;
	}

	/* nothing to do, just clear the request */
	clear_bit(KVM_REQ_UNHALT, &vcpu->requests);

	return 0;
}

@@ -1475,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)

	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
	/* Only one cpu at a time may enter/leave the STOPPED state. */
	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
	spin_lock(&vcpu->kvm->arch.start_stop_lock);
	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);

	for (i = 0; i < online_vcpus; i++) {
@@ -1501,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
	 * Let's play safe and flush the VCPU at startup.
	 */
	vcpu->arch.sie_block->ihcpu  = 0xffff;
	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
	return;
}

@@ -1515,17 +1517,17 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)

	trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
	/* Only one cpu at a time may enter/leave the STOPPED state. */
	spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
	spin_lock(&vcpu->kvm->arch.start_stop_lock);
	online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);

	/* Need to lock access to action_bits to avoid a SIGP race condition */
	spin_lock_bh(&vcpu->arch.local_int.lock);
	spin_lock(&vcpu->arch.local_int.lock);
	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);

	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
	vcpu->arch.local_int.action_bits &=
				 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
	spin_unlock_bh(&vcpu->arch.local_int.lock);
	spin_unlock(&vcpu->arch.local_int.lock);

	__disable_ibs_on_vcpu(vcpu);

@@ -1544,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
		__enable_ibs_on_vcpu(started_vcpu);
	}

	spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
	spin_unlock(&vcpu->kvm->arch.start_stop_lock);
	return;
}

Loading